コード例 #1
0
def SkySeg(
        input_shape=(256, 256, 3), regularization_factor=1e-4,
        learning_rate=7e-4):

    input_img = layers.Input(shape=input_shape,
                             dtype='float32',
                             name="input_img")
    conv_1 = layers.Conv2D(16, 3, strides=(1, 1), name = "conv1",\
                                padding='same', activation="relu", kernel_initializer='glorot_uniform', kernel_regularizer = keras.regularizers.l2(regularization_factor))(input_img)

    conv_2 = layers.Conv2D(16, 3, strides=(1, 1), name = "conv2",\
                                padding='same', activation="relu", kernel_initializer='glorot_uniform',  kernel_regularizer = keras.regularizers.l2(regularization_factor))(conv_1)
    pool_1 = layers.MaxPool2D()(conv_2)
    conv_3 = layers.Conv2D(32, 3, strides=(1, 1), name = "conv3",\
                                padding='same', activation="relu", kernel_initializer='glorot_uniform', kernel_regularizer = keras.regularizers.l2(regularization_factor))(pool_1)
    conv_4 = layers.Conv2D(32, 3, strides=(1, 1), name = "conv4",\
                                padding='same', activation="relu", kernel_initializer='glorot_uniform', kernel_regularizer = keras.regularizers.l2(regularization_factor))(conv_3)
    pool_2 = layers.MaxPool2D()(conv_4)
    conv_5 = layers.Conv2D(64, 3, strides=(1, 1), name = "conv5",\
                                padding='same', activation="relu", kernel_initializer='glorot_uniform', kernel_regularizer = keras.regularizers.l2(regularization_factor))(pool_2)
    conv_6 = layers.Conv2D(64, 3, strides=(1, 1), name = "conv6",\
                                padding='same', activation="relu", kernel_initializer='glorot_uniform', kernel_regularizer = keras.regularizers.l2(regularization_factor))(conv_5)
    pool_3 = layers.MaxPool2D()(conv_6)
    conv_7 = layers.Conv2D(64, 3, strides=(1, 1), name = "conv7",\
                                padding='same', activation="relu", kernel_initializer='glorot_uniform', kernel_regularizer = keras.regularizers.l2(regularization_factor))(pool_3)
    conv_8 = layers.Conv2D(64, 3, strides=(1, 1), name = "conv8",\
                                padding='same', activation="relu", kernel_initializer='glorot_uniform', kernel_regularizer = keras.regularizers.l2(regularization_factor))(conv_7)
    pool_4 = layers.MaxPool2D()(conv_8)
    conv_9 = layers.Conv2D(128, 3, strides=(1, 1), name = "conv9",\
                                padding='same', activation="relu", kernel_initializer='glorot_uniform', kernel_regularizer = keras.regularizers.l2(regularization_factor))(pool_4)
    conv_10 = layers.Conv2D(128, 3, strides=(1, 1), name = "conv10",\
                                padding='same', activation="relu", kernel_initializer='glorot_uniform', kernel_regularizer = keras.regularizers.l2(regularization_factor))(conv_9)
    pool_5 = layers.MaxPool2D()(conv_10)
    flat = layers.Flatten()(pool_5)
    fc_1 = layers.Dense(4 * 4 * 16,
                        activation='relu',
                        name="fc_encode",
                        kernel_initializer='glorot_uniform')(flat)
    fc_2 = layers.Dense(8 * 8 * 128,
                        activation='relu',
                        name="fc_decode",
                        kernel_initializer='glorot_uniform')(fc_1)
    rsp = layers.Reshape((8, 8, 128))(fc_2)
    up_1 = layers.UpSampling2D()(rsp)
    dcon_1 = layers.Conv2DTranspose(128, 3, strides = (1, 1), name = "dconv1",\
                                       padding='same', activation="relu", kernel_initializer='glorot_uniform', kernel_regularizer = keras.regularizers.l2(regularization_factor))(up_1)
    dcon_2 = layers.Conv2DTranspose(128, 3, strides = (1, 1), name = "dconv2",\
                                       padding='same', activation="relu", kernel_initializer='glorot_uniform', kernel_regularizer = keras.regularizers.l2(regularization_factor))(dcon_1)
    skip_1 = layers.Add()([conv_10, dcon_2])
    up_2 = layers.UpSampling2D()(skip_1)
    dcon_3 = layers.Conv2DTranspose(64, 3, strides = (1, 1), name = "dconv3",\
                                       padding='same', activation="relu", kernel_initializer='glorot_uniform', kernel_regularizer = keras.regularizers.l2(regularization_factor))(up_2)
    dcon_4 = layers.Conv2DTranspose(64, 3, strides = (1, 1), name = "dconv4",\
                                       padding='same', activation="relu", kernel_initializer='glorot_uniform', kernel_regularizer = keras.regularizers.l2(regularization_factor))(dcon_3)
    skip_2 = layers.Add()([conv_8, dcon_4])
    up_3 = layers.UpSampling2D()(skip_2)
    dcon_5 = layers.Conv2DTranspose(64, 3, strides = (1, 1), name = "dconv5",\
                                       padding='same', activation="relu", kernel_initializer='glorot_uniform', kernel_regularizer = keras.regularizers.l2(regularization_factor))(up_3)
    dcon_6 = layers.Conv2DTranspose(64, 3, strides = (1, 1), name = "dconv6",\
                                       padding='same', activation="relu", kernel_initializer='glorot_uniform', kernel_regularizer = keras.regularizers.l2(regularization_factor))(dcon_5)
    skip_3 = layers.Add()([conv_6, dcon_6])
    up_4 = layers.UpSampling2D()(skip_3)
    dcon_7 = layers.Conv2DTranspose(32, 3, strides = (1, 1), name = "dconv7",\
                                       padding='same', activation="relu", kernel_initializer='glorot_uniform', kernel_regularizer = keras.regularizers.l2(regularization_factor))(up_4)
    dcon_8 = layers.Conv2DTranspose(32, 3, strides = (1, 1), name = "dconv8",\
                                       padding='same', activation="relu", kernel_initializer='glorot_uniform', kernel_regularizer = keras.regularizers.l2(regularization_factor))(dcon_7)
    skip_4 = layers.Add()([conv_4, dcon_8])
    up_5 = layers.UpSampling2D()(skip_4)
    dcon_9 = layers.Conv2DTranspose(16, 3, strides = (1, 1), name = "dconv9",\
                                       padding='same', activation="relu", kernel_initializer='glorot_uniform', kernel_regularizer = keras.regularizers.l2(regularization_factor))(up_5)
    dcon_10 = layers.Conv2DTranspose(16, 3, strides = (1, 1), name = "dconv10",\
                                       padding='same', activation="relu", kernel_initializer='glorot_uniform', kernel_regularizer = keras.regularizers.l2(regularization_factor))(dcon_9)
    skip_5 = layers.Add()([conv_2, dcon_10])

    output = layers.Conv2D(2, 3, strides=(1, 1), name = "output",\
                                padding='same', activation="softmax", kernel_initializer='glorot_uniform', kernel_regularizer = keras.regularizers.l2(regularization_factor))(skip_5)

    model = keras.Model(inputs=[input_img], outputs=output)
    model.compile(optimizer=keras.optimizers.Adam(learning_rate=learning_rate),
                  loss=keras.losses.sparse_categorical_crossentropy,
                  metrics=[IRBS])

    return model
コード例 #2
0
def UNet():

    concat_axis = 3
    inputss = Input((512, 512, 1))
    print(inputss.shape)

    conv1 = layers.Conv2D(32, (3, 3), activation='relu', padding='same', name='conv1_1')(inputss)
    conv1 = layers.Conv2D(32, (3, 3), activation='relu', padding='same')(conv1)
    pool1 = layers.MaxPooling2D(pool_size=(2, 2))(conv1)

    conv2 = layers.Conv2D(64, (3, 3), activation='relu', padding='same')(pool1)
    conv2 = layers.Conv2D(64, (3, 3), activation='relu', padding='same')(conv2)
    pool2 = layers.MaxPooling2D(pool_size=(2, 2))(conv2)

    conv3 = layers.Conv2D(128, (3, 3), activation='relu', padding='same')(pool2)
    conv3 = layers.Conv2D(128, (3, 3), activation='relu', padding='same')(conv3)
    pool3 = layers.MaxPooling2D(pool_size=(2, 2))(conv3)

    conv4 = layers.Conv2D(256, (3, 3), activation='relu', padding='same')(pool3)
    conv4 = layers.Conv2D(256, (3, 3), activation='relu', padding='same')(conv4)
    conv4 = Dropout(0.2)(conv4)
    #     pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)

    #     conv5 = Conv2D(512, (3, 3), activation='relu', padding='same')(pool4)
    #     conv5 = Conv2D(512, (3, 3), activation='relu', padding='same')(conv5)

    #     up_conv5 = UpSampling2D(size=(2, 2))(conv5)
    #     ch, cw = get_crop_shape(conv4, up_conv5)
    #     crop_conv4 = layers.Cropping2D(cropping=(ch,cw))(conv4)
    #     up6 = layers.concatenate([up_conv5, crop_conv4], axis=concat_axis)
    #     conv6 = layers.Conv2D(256, (3, 3), activation='relu', padding='same')(up6)
    #     conv6 = layers.Conv2D(256, (3, 3), activation='relu', padding='same')(conv6)

    up_conv6 = layers.UpSampling2D(size=(2, 2))(conv4)
    ch, cw = get_crop_shape(conv3, up_conv6)
    crop_conv3 = layers.Cropping2D(cropping=(ch, cw))(conv3)
    up7 = layers.concatenate([up_conv6, crop_conv3], axis=concat_axis)
    conv7 = layers.Conv2D(128, (3, 3), activation='relu', padding='same')(up7)
    conv7 = layers.Conv2D(128, (3, 3), activation='relu', padding='same')(conv7)

    up_conv7 = layers.UpSampling2D(size=(2, 2))(conv7)
    ch, cw = get_crop_shape(conv2, up_conv7)
    crop_conv2 = layers.Cropping2D(cropping=(ch, cw))(conv2)
    up8 = layers.concatenate([up_conv7, crop_conv2], axis=concat_axis)
    conv8 = layers.Conv2D(64, (3, 3), activation='relu', padding='same')(up8)
    conv8 = layers.Conv2D(64, (3, 3), activation='relu', padding='same')(conv8)

    up_conv8 = layers.UpSampling2D(size=(2, 2))(conv8)
    ch, cw = get_crop_shape(conv1, up_conv8)
    crop_conv1 = layers.Cropping2D(cropping=(ch, cw))(conv1)
    up9 = layers.concatenate([up_conv8, crop_conv1], axis=concat_axis)
    conv9 = layers.Conv2D(32, (3, 3), activation='relu', padding='same')(up9)
    conv9 = layers.Conv2D(32, (3, 3), activation='relu', padding='same')(conv9)

    ch, cw = get_crop_shape(inputss, conv9)
    conv9 = layers.ZeroPadding2D(padding=((ch[0], ch[1]), (cw[0], cw[1])))(conv9)
    conv10 = layers.Conv2D(1, (1, 1))(conv9)

    print(conv10.shape)


    # conv1 = Conv2D(32, 3, activation='relu', padding='same', kernel_initializer='he_normal')(inputss)
    # print(conv1.shape)
    # conv1 = Conv2D(32, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv1)
    # print(conv1.shape)
    # pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
    # print(pool1.shape)
    # print('\n')
    #
    # conv2 = Conv2D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal')(pool1)
    # print(conv2.shape)
    # conv2 = Conv2D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv2)
    # print(conv2.shape)
    # pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
    # print(pool2.shape)
    # print('\n')
    #
    # conv3 = Conv2D(128, 3, activation='relu', padding='same', kernel_initializer='he_normal')(pool2)
    # print(conv3.shape)
    # conv3 = Conv2D(128, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv3)
    # print(conv3.shape)
    # pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
    # print(pool3.shape)
    # print('\n')
    #
    # conv4 = Conv2D(256, 3, activation='relu', padding='same', kernel_initializer='he_normal')(pool3)
    # print(conv4.shape)
    # conv4 = Conv2D(256, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv4)
    # print(conv4.shape)
    # drop4 = Dropout(0.5)(conv4)
    # pool4 = MaxPooling2D(pool_size=(2, 2))(drop4)
    # print(pool4.shape)
    # print('\n')
    # #
    # # conv5 = Conv2D(1024, 3, activation='relu', padding='same', kernel_initializer='he_normal')(pool4)
    # # print(conv5.shape)
    # # conv5 = Conv2D(1024, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv5)
    # # print(conv5.shape)
    # # drop5 = Dropout(0.5)(conv5)
    # # print('\n')
    # #
    # # up6 = Conv2D(512, 2, activation='relu', padding='same', kernel_initializer='he_normal')(
    # #     UpSampling2D(size=(2, 2))(drop5))
    # # print(up6.shape)
    # # print(drop4.shape)
    # # merge6 = merge([drop4, up6], mode='concat', concat_axis=3)
    # # print('merge: ')
    # # print(merge6.shape)
    # # conv6 = Conv2D(512, 3, activation='relu', padding='same', kernel_initializer='he_normal')(merge6)
    # # conv6 = Conv2D(512, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv6)
    # #
    # up7 = Conv2D(128, 2, activation='relu', padding='same', kernel_initializer='he_normal')(
    #     UpSampling2D(size=(2, 2))(conv4))
    # merge7 = merge([conv3, up7], mode='concat', concat_axis=3)
    # conv7 = Conv2D(128, 3, activation='relu', padding='same', kernel_initializer='he_normal')(merge7)
    # conv7 = Conv2D(128, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv7)
    #
    # up8 = Conv2D(64, 2, activation='relu', padding='same', kernel_initializer='he_normal')(
    #     UpSampling2D(size=(2, 2))(conv7))
    # merge8 = concatenate([conv2, up8], axis=3)
    # conv8 = Conv2D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal')(merge8)
    # conv8 = Conv2D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv8)
    #
    # up9 = Conv2D(32, 2, activation='relu', padding='same', kernel_initializer='he_normal')(
    #     UpSampling2D(size=(2, 2))(conv8))
    # merge9 = concatenate([conv1, up9], axis=3)
    # conv9 = Conv2D(32, 3, activation='relu', padding='same', kernel_initializer='he_normal')(merge9)
    # conv9 = Conv2D(32, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv9)
    # conv9 = Conv2D(2, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv9)
    # # conv10 = Conv2D(1, 1, activation='sigmoid')(conv9)
    # conv10 = Softmax()(conv9)

    print("llllllllllllllllllllllllast")
    Outmodel = Model(inputs=inputss, outputs=conv10)
    Outmodel.summary()
    print(conv10.shape)

    return Outmodel
コード例 #3
0
def Model_PSP(pre_trained_model, num_classes=35):
    last_pretrained_layer = pre_trained_model.get_layer('conv3_block4_out')
    last_output = last_pretrained_layer.output
    last_output = layers.Conv2D(filters=128,
                                kernel_size=(1, 1),
                                name='Compress_out')(last_output)

    #Define the params for the pooling module
    #This has to be 1/4 times the input channel depth
    INPUT_CHANNEL_DEPTH = 128
    INPUT_DIM = 32
    TARGET_CHANNEL_DEPTH = INPUT_CHANNEL_DEPTH / 4
    Y_KERNEL_DIM = (INPUT_DIM // 2, INPUT_DIM // 2)
    B_KERNEL_DIM = (INPUT_DIM // 4, INPUT_DIM // 4)
    G_KERNEL_DIM = (INPUT_DIM // 8, INPUT_DIM // 8)
    #Now we define the pyramidal pooling architecture
    base = last_output
    #Define the GAP with 1*1 block size for 1x1 bin
    red_blk = layers.GlobalAvgPool2D(name='red_block_pooling')(base)
    red_blk = layers.Reshape((1, 1, INPUT_CHANNEL_DEPTH))(red_blk)
    red_blk = layers.Conv2D(filters=TARGET_CHANNEL_DEPTH,
                            kernel_size=(1, 1),
                            name='red_1x1_conv')(red_blk)
    red_blk = layers.UpSampling2D(size=(256, 256),
                                  interpolation='bilinear',
                                  name='red_upsample')(red_blk)

    #Define the average pooling for the yellow block for 2x2 bin
    y_blk = layers.AvgPool2D(pool_size=Y_KERNEL_DIM,
                             name='yellow_blk_pooling')(base)
    y_blk = layers.Conv2D(filters=TARGET_CHANNEL_DEPTH,
                          kernel_size=(1, 1),
                          name='yellow_1x1_conv')(y_blk)
    y_blk = layers.UpSampling2D(size=(128, 128),
                                interpolation='bilinear',
                                name='yellow_upsample')(y_blk)

    #Define the average pooling for the blue block for 4x4 bin
    blue_blk = layers.AvgPool2D(pool_size=B_KERNEL_DIM,
                                name='blue_blk_pooling')(base)
    blue_blk = layers.Conv2D(filters=TARGET_CHANNEL_DEPTH,
                             kernel_size=(1, 1),
                             name='blue_1x1_conv')(blue_blk)
    blue_blk = layers.UpSampling2D(size=(64, 64),
                                   interpolation='bilinear',
                                   name='blue_upsample')(blue_blk)

    #Define the average pooling for the green block for 8x8 bins
    green_blk = layers.AvgPool2D(pool_size=G_KERNEL_DIM,
                                 name='green_blk_pooling')(base)
    green_blk = layers.Conv2D(filters=TARGET_CHANNEL_DEPTH,
                              kernel_size=(1, 1),
                              name='green_1x1_conv')(green_blk)
    green_blk = layers.UpSampling2D(size=(32, 32),
                                    interpolation='bilinear',
                                    name='green_upsample')(green_blk)

    #Now we upsample the base and check all output shapes to ensure that they match
    base = layers.UpSampling2D(size=(256 // INPUT_DIM, 256 // INPUT_DIM),
                               interpolation='bilinear',
                               name='base_upsample')(base)
    print(base.get_shape)
    print(red_blk.get_shape)
    print(y_blk.get_shape)
    print(blue_blk.get_shape)
    print(green_blk.get_shape)

    #Generate the final output and check shape
    PPM = tf.keras.layers.concatenate(
        [base, green_blk, blue_blk, y_blk, red_blk])
    print(PPM.get_shape)

    #Now we define the final convolutional block
    output = layers.Conv2D(filters=num_classes,
                           kernel_size=(3, 3),
                           padding='same',
                           name='final_3x3_conv_blk',
                           activation='softmax')(PPM)
    return output
コード例 #4
0
ファイル: generator.py プロジェクト: Sanketb2312/StyleGan
def get_skip_generator(latent_dim=64,
                       channels=64,
                       target_size=64,
                       latent_style_layers=2):
    num_upsamples = int(math.log2(target_size) - 2)
    side_length = 4

    # Learnable constant image
    dummy_in = layers.Input(shape=(1, ), name="dummy_in")
    x = layers.Dense(side_length * side_length * channels,
                     name="const_img",
                     kernel_initializer="zeros",
                     bias_initializer="random_normal")(dummy_in)
    x = layers.Reshape((side_length, side_length, channels))(x)

    # RGB output
    y = layers.Conv2D(filters=3,
                      kernel_size=(3, 3),
                      kernel_initializer="random_normal",
                      bias_initializer="zeros",
                      activation="tanh",
                      padding="same")(x)

    # Latent input
    latent_in = layers.Input(shape=(latent_dim, ), name="latent_in")

    noise_inputs = []
    for i in range(num_upsamples):
        # Style block without upsampling
        noise_in = layers.Input(shape=(side_length, side_length, 1),
                                name=f"noise_in_{side_length}x{side_length}")
        noise_inputs.append(noise_in)
        x = style_block(x,
                        latent_in,
                        noise_in,
                        channels=channels,
                        latent_style_layers=latent_style_layers,
                        upsample=False,
                        name=f"{side_length}x{side_length}")

        # Style block with upsampling
        side_length = 2 * side_length
        noise_in = layers.Input(
            shape=(side_length, side_length, 1),
            name=f"noise_in_upsample_{side_length}x{side_length}")
        noise_inputs.append(noise_in)
        x = style_block(x,
                        latent_in,
                        noise_in,
                        channels=channels,
                        latent_style_layers=latent_style_layers,
                        upsample=True,
                        name=f"upsample_{side_length}x{side_length}")

        # Convert deep image to RGB
        z = layers.Conv2D(filters=3,
                          kernel_size=(3, 3),
                          kernel_initializer="random_normal",
                          bias_initializer="zeros",
                          activation="tanh",
                          padding="same",
                          name=f"to_rgb_{side_length}x{side_length}")(x)

        # Add deep RGB to upsampled current RGB
        y = layers.UpSampling2D(
            size=(2, 2),
            interpolation="bilinear",
            name=f"rgb_upsampling_{side_length}x{side_length}")(y)
        y = layers.Add(name=f"add_deep_rgb_{side_length}x{side_length}")(
            [y, z])

    generator = tf.keras.Model(inputs=[dummy_in, latent_in] + noise_inputs,
                               outputs=y,
                               name="generator")
    return generator
コード例 #5
0
    def __init__(self, img_shape, num_class, d=32, weights=weights_url):
        concat_axis = 3
        inputs = layers.Input(shape=img_shape)

        conv1 = layers.Conv2D(d, (3, 3),
                              activation='relu',
                              padding='same',
                              name='conv1_1')(inputs)
        conv1 = layers.Conv2D(d, (3, 3), activation='relu',
                              padding='same')(conv1)
        pool1 = layers.MaxPooling2D(pool_size=(2, 2))(conv1)
        conv2 = layers.Conv2D(d * 2, (3, 3), activation='relu',
                              padding='same')(pool1)
        conv2 = layers.Conv2D(d * 2, (3, 3), activation='relu',
                              padding='same')(conv2)
        pool2 = layers.MaxPooling2D(pool_size=(2, 2))(conv2)

        conv3 = layers.Conv2D(d * 4, (3, 3), activation='relu',
                              padding='same')(pool2)
        conv3 = layers.Conv2D(d * 4, (3, 3), activation='relu',
                              padding='same')(conv3)
        pool3 = layers.MaxPooling2D(pool_size=(2, 2))(conv3)

        conv4 = layers.Conv2D(d * 8, (3, 3), activation='relu',
                              padding='same')(pool3)
        conv4 = layers.Conv2D(d * 8, (3, 3), activation='relu',
                              padding='same')(conv4)
        pool4 = layers.MaxPooling2D(pool_size=(2, 2))(conv4)

        conv5 = layers.Conv2D(d * 16, (3, 3),
                              activation='relu',
                              padding='same')(pool4)
        conv5 = layers.Conv2D(d * 16, (3, 3),
                              activation='relu',
                              padding='same')(conv5)

        up_conv5 = layers.UpSampling2D(size=(2, 2))(conv5)
        ch, cw = self.get_crop_shape(conv4, up_conv5)
        crop_conv4 = layers.Cropping2D(cropping=(ch, cw))(conv4)
        up6 = layers.concatenate([up_conv5, crop_conv4], axis=concat_axis)
        conv6 = layers.Conv2D(d * 8, (3, 3), activation='relu',
                              padding='same')(up6)
        conv6 = layers.Conv2D(d * 8, (3, 3), activation='relu',
                              padding='same')(conv6)

        up_conv6 = layers.UpSampling2D(size=(2, 2))(conv6)
        ch, cw = self.get_crop_shape(conv3, up_conv6)
        crop_conv3 = layers.Cropping2D(cropping=(ch, cw))(conv3)
        up7 = layers.concatenate([up_conv6, crop_conv3], axis=concat_axis)
        conv7 = layers.Conv2D(d * 4, (3, 3), activation='relu',
                              padding='same')(up7)
        conv7 = layers.Conv2D(d * 4, (3, 3), activation='relu',
                              padding='same')(conv7)

        up_conv7 = layers.UpSampling2D(size=(2, 2))(conv7)
        ch, cw = self.get_crop_shape(conv2, up_conv7)
        crop_conv2 = layers.Cropping2D(cropping=(ch, cw))(conv2)
        up8 = layers.concatenate([up_conv7, crop_conv2], axis=concat_axis)
        conv8 = layers.Conv2D(d * 2, (3, 3), activation='relu',
                              padding='same')(up8)
        conv8 = layers.Conv2D(d * 2, (3, 3), activation='relu',
                              padding='same')(conv8)

        up_conv8 = layers.UpSampling2D(size=(2, 2))(conv8)
        ch, cw = self.get_crop_shape(conv1, up_conv8)
        crop_conv1 = layers.Cropping2D(cropping=(ch, cw))(conv1)
        up9 = layers.concatenate([up_conv8, crop_conv1], axis=concat_axis)
        conv9 = layers.Conv2D(d, (3, 3), activation='relu',
                              padding='same')(up9)
        conv9 = layers.Conv2D(d, (3, 3), activation='relu',
                              padding='same')(conv9)

        ch, cw = self.get_crop_shape(inputs, conv9)
        conv9 = layers.ZeroPadding2D(padding=((ch[0], ch[1]), (cw[0],
                                                               cw[1])))(conv9)
        conv10 = layers.Conv2D(num_class, (1, 1), activation="sigmoid")(conv9)

        super().__init__(inputs=inputs, outputs=conv10)

        if weights is not None:
            self.load_weight_file(weights)
コード例 #6
0
    def __init__(self,
                 num_channels,
                 use_2d=True,
                 kernel_size=2,
                 activation='relu',
                 use_attention=False,
                 use_batchnorm=False,
                 use_transpose=False,
                 use_bias=True,
                 strides=2,
                 data_format='channels_last',
                 name="upsampling_conv_block",
                 **kwargs):

        super(Up_Conv, self).__init__(name=name)

        self.data_format = data_format
        self.use_attention = use_attention

        if use_transpose:
            if use_2d:
                self.upconv_layer = tfkl.Conv2DTranspose(
                    num_channels,
                    kernel_size,
                    padding='same',
                    strides=strides,
                    data_format=self.data_format)
            else:
                self.upconv_layer = tfkl.Conv3DTranspose(
                    num_channels,
                    kernel_size,
                    padding='same',
                    strides=strides,
                    data_format=self.data_format)
        else:
            if use_2d:
                self.upconv_layer = tfkl.UpSampling2D(size=strides)
            else:
                self.upconv_layer = tfkl.UpSampling3D(size=strides)

        if self.use_attention:
            self.attention = Attention_Gate(num_channels=num_channels,
                                            use_2d=use_2d,
                                            kernel_size=1,
                                            activation=activation,
                                            padding='same',
                                            strides=strides,
                                            use_bias=use_bias,
                                            data_format=self.data_format)

        self.conv = Conv_Block(num_channels=num_channels,
                               use_2d=use_2d,
                               num_conv_layers=1,
                               kernel_size=kernel_size,
                               activation=activation,
                               use_batchnorm=use_batchnorm,
                               use_dropout=False,
                               data_format=self.data_format)

        self.conv_block = Conv_Block(num_channels=num_channels,
                                     use_2d=use_2d,
                                     num_conv_layers=2,
                                     kernel_size=3,
                                     activation=activation,
                                     use_batchnorm=use_batchnorm,
                                     use_dropout=False,
                                     data_format=self.data_format)
コード例 #7
0
def build_mv2_hourglass_model(number_of_keypoints):
    hourglas_stage_num = 4
    input_shape = (192, 192, 3)  # h, w, c
    input = layers.Input(shape=input_shape)

    ## HEADER
    # cnn with regularizer
    x = layers.Conv2D(filters=16,
                      kernel_size=(3, 3),
                      strides=(2, 2),
                      padding='SAME',
                      kernel_regularizer=l2_regularizer_00004)(input)
    # batch norm
    x = layers.BatchNormalization(momentum=0.999)(x)
    # activation
    x = layers.ReLU(max_value=6)(x)

    # 128, 112
    x = _inverted_bottleneck(x,
                             up_channel_rate=1,
                             channels=16,
                             is_subsample=False,
                             kernel_size=3)
    x = _inverted_bottleneck(x,
                             up_channel_rate=1,
                             channels=16,
                             is_subsample=False,
                             kernel_size=3)

    # 64, 56
    x = _inverted_bottleneck(x,
                             up_channel_rate=6,
                             channels=24,
                             is_subsample=True,
                             kernel_size=3)
    x = _inverted_bottleneck(x,
                             up_channel_rate=6,
                             channels=24,
                             is_subsample=False,
                             kernel_size=3)
    x = _inverted_bottleneck(x,
                             up_channel_rate=6,
                             channels=24,
                             is_subsample=False,
                             kernel_size=3)
    x = _inverted_bottleneck(x,
                             up_channel_rate=6,
                             channels=24,
                             is_subsample=False,
                             kernel_size=3)
    x = _inverted_bottleneck(x,
                             up_channel_rate=6,
                             channels=24,
                             is_subsample=False,
                             kernel_size=3)

    captured_h, captured_w = int(x.shape[1]), int(x.shape[2])
    print(f"captured_h, captured_w: {captured_h}, {captured_w}")

    # HOURGLASS recursively
    # stage = 4
    #

    x, middle_output_layers = _hourglass_module(
        x,
        stage_index=hourglas_stage_num,
        number_of_keypoints=number_of_keypoints)

    print("before")
    for l in middle_output_layers:
        print(f"  l.shape: {l.shape}")

    for layer_index, middle_layer in enumerate(middle_output_layers):
        layer_stage = layer_index + 1
        h, w = middle_layer.shape[1], middle_layer.shape[2]
        if h == captured_h and w == captured_w:
            continue
        else:
            upsampling_size = (captured_h // h, captured_w // w)
            middle_output_layers[layer_index] = layers.UpSampling2D(
                size=upsampling_size, interpolation='bilinear')(middle_layer)

    print("after")
    for l in middle_output_layers:
        print(f"  l.shape: {l.shape}")

    model = models.Model(input, outputs=middle_output_layers)
    return model
コード例 #8
0
 def UpSampling(x, nfilter):
     # Maybe should test bilinear
     x = KL.UpSampling2D(interpolation="nearest")(x)
     x = KL.Conv2D(nfilter, (1, 1))(x)
     x = KL.BatchNormalization()(x)
     return x
コード例 #9
0
ファイル: model.py プロジェクト: zhjpqq/SAPD
def build_BiFPN(features, num_channels, id, freeze_bn=False):
    if id == 0:
        _, _, C3, C4, C5 = features
        P3_in = ConvBlock(num_channels, kernel_size=1, strides=1, freeze_bn=freeze_bn, name='BiFPN_{}_P3'.format(id))(
            C3)
        P4_in = ConvBlock(num_channels, kernel_size=1, strides=1, freeze_bn=freeze_bn, name='BiFPN_{}_P4'.format(id))(
            C4)
        P5_in = ConvBlock(num_channels, kernel_size=1, strides=1, freeze_bn=freeze_bn, name='BiFPN_{}_P5'.format(id))(
            C5)
        P6_in = ConvBlock(num_channels, kernel_size=3, strides=2, freeze_bn=freeze_bn, name='BiFPN_{}_P6'.format(id))(
            C5)
        P7_in = ConvBlock(num_channels, kernel_size=3, strides=2, freeze_bn=freeze_bn, name='BiFPN_{}_P7'.format(id))(
            P6_in)
    else:
        P3_in, P4_in, P5_in, P6_in, P7_in = features
        P3_in = ConvBlock(num_channels, kernel_size=1, strides=1, freeze_bn=freeze_bn, name='BiFPN_{}_P3'.format(id))(
            P3_in)
        P4_in = ConvBlock(num_channels, kernel_size=1, strides=1, freeze_bn=freeze_bn, name='BiFPN_{}_P4'.format(id))(
            P4_in)
        P5_in = ConvBlock(num_channels, kernel_size=1, strides=1, freeze_bn=freeze_bn, name='BiFPN_{}_P5'.format(id))(
            P5_in)
        P6_in = ConvBlock(num_channels, kernel_size=1, strides=1, freeze_bn=freeze_bn, name='BiFPN_{}_P6'.format(id))(
            P6_in)
        P7_in = ConvBlock(num_channels, kernel_size=1, strides=1, freeze_bn=freeze_bn, name='BiFPN_{}_P7'.format(id))(
            P7_in)

    # upsample
    P7_U = layers.UpSampling2D()(P7_in)
    P6_td = layers.Add()([P7_U, P6_in])
    P6_td = DepthwiseSeparableConvBlock(num_channels, kernel_size=3, strides=1, freeze_bn=freeze_bn,
                                        name='BiFPN_{}_U_P6'.format(id))(P6_td)
    P6_U = layers.UpSampling2D()(P6_td)
    P5_td = layers.Add()([P6_U, P5_in])
    P5_td = DepthwiseSeparableConvBlock(num_channels, kernel_size=3, strides=1, freeze_bn=freeze_bn,
                                        name='BiFPN_{}_U_P5'.format(id))(P5_td)
    P5_U = layers.UpSampling2D()(P5_td)
    P4_td = layers.Add()([P5_U, P4_in])
    P4_td = DepthwiseSeparableConvBlock(num_channels, kernel_size=3, strides=1, freeze_bn=freeze_bn,
                                        name='BiFPN_{}_U_P4'.format(id))(P4_td)
    P4_U = layers.UpSampling2D()(P4_td)
    P3_out = layers.Add()([P4_U, P3_in])
    P3_out = DepthwiseSeparableConvBlock(num_channels, kernel_size=3, strides=1, freeze_bn=freeze_bn,
                                         name='BiFPN_{}_U_P3'.format(id))(P3_out)
    # downsample
    P3_D = layers.MaxPooling2D(strides=(2, 2))(P3_out)
    P4_out = layers.Add()([P3_D, P4_td, P4_in])
    P4_out = DepthwiseSeparableConvBlock(num_channels, kernel_size=3, strides=1, freeze_bn=freeze_bn,
                                         name='BiFPN_{}_D_P4'.format(id))(P4_out)
    P4_D = layers.MaxPooling2D(strides=(2, 2))(P4_out)
    P5_out = layers.Add()([P4_D, P5_td, P5_in])
    P5_out = DepthwiseSeparableConvBlock(num_channels, kernel_size=3, strides=1, freeze_bn=freeze_bn,
                                         name='BiFPN_{}_D_P5'.format(id))(P5_out)
    P5_D = layers.MaxPooling2D(strides=(2, 2))(P5_out)
    P6_out = layers.Add()([P5_D, P6_td, P6_in])
    P6_out = DepthwiseSeparableConvBlock(num_channels, kernel_size=3, strides=1, freeze_bn=freeze_bn,
                                         name='BiFPN_{}_D_P6'.format(id))(P6_out)
    P6_D = layers.MaxPooling2D(strides=(2, 2))(P6_out)
    P7_out = layers.Add()([P6_D, P7_in])
    P7_out = DepthwiseSeparableConvBlock(num_channels, kernel_size=3, strides=1, freeze_bn=freeze_bn,
                                         name='BiFPN_{}_D_P7'.format(id))(P7_out)

    return P3_out, P4_out, P5_out, P6_out, P7_out
コード例 #10
0
ファイル: unet.py プロジェクト: carnotresearch/cr-vision
def cnn_custom_unet(inputs,
                    filters=16,
                    num_layers=4,
                    activation='relu',
                    use_batch_norm=True,
                    use_bias=None,
                    upsample_mode='deconv',
                    dropout=0.3,
                    dropout_change_per_layer=0,
                    dropout_type='spatial',
                    use_dropout_on_upsampling=False):
    # array to hold outputs of blocks
    # in the contracting path
    # for later use
    # in the expanding path
    down_blocks = []

    # build the contracting path
    # Create the downsampling layers
    net = inputs
    for i in range(num_layers):
        name = f'contract_{i+1}'
        net = unet_conv_block(inputs=net,
                              filters=filters,
                              padding='same',
                              activation=activation,
                              use_batch_norm=use_batch_norm,
                              use_bias=use_bias,
                              dropout=dropout,
                              dropout_type=dropout_type,
                              name=name)
        # save this layer for later reference
        down_blocks.append(net)
        # add pooling
        net = layers.MaxPooling2D((2, 2), strides=2, name=f'{name}_pool')(net)
        # increase the number of filters
        filters = filters * 2
        # increase dropout if required
        dropout = dropout + dropout_change_per_layer

    # build the lateral path
    net = unet_conv_block(inputs=net,
                          filters=filters,
                          padding='same',
                          activation=activation,
                          use_batch_norm=use_batch_norm,
                          use_bias=use_bias,
                          dropout=dropout,
                          dropout_type=dropout_type,
                          name='lateral')

    if not use_dropout_on_upsampling:
        # disable dropout for expansion part of network
        dropout = 0
        dropout_change_per_layer = 0

    # build the expanding path
    i = num_layers
    for conv in reversed(down_blocks):
        # decrease the number of filters
        filters = filters // 2
        name = f'expand_{i}'
        # decrease dropout if required
        dropout -= dropout_change_per_layer
        # upsample
        if upsample_mode == 'deconv':
            net = layers.Conv2DTranspose(filters, (2, 2),
                                         strides=(2, 2),
                                         padding='same',
                                         name=f'{name}_upconv')(net)
        else:
            net = layers.UpSampling2D(strides=(2, 2),
                                      name=f'{name}_upconv')(net)
        # cropping is not needed due to same padding
        # TODO attention concatenation
        # concatenate
        net = layers.concatenate([net, conv], name=f'{name}_concatenate')
        # add one more convolutional block
        net = unet_conv_block(inputs=net,
                              filters=filters,
                              padding='same',
                              activation=activation,
                              use_batch_norm=use_batch_norm,
                              use_bias=use_bias,
                              dropout=dropout,
                              dropout_type=dropout_type,
                              name=name)
        i = i - 1
    # we are done
    return net
コード例 #11
0
    def generator_model():
        start_imagesize = 128
        imagesize = 128
        nodes = 64
        max_nodes = 512
        max_count = 0
        stride = 2
        bn_layer_list = []
        keep = True

        input_layer   = Input(shape = (imagesize, imagesize, 3))
        con2d_layer   = layers.Conv2D(nodes, 3, stride, padding = 'same')(input_layer)
        hidden_layer  = layers.PReLU()(con2d_layer)
        bn_layer_list.append(layers.BatchNormalization()(hidden_layer))
        imagesize     = math.ceil(imagesize / stride)

        while keep:
            if nodes < max_nodes:
                nodes *= 2
            else:
                max_count += 1
            con2d_layer   = layers.Conv2D(nodes, 3, stride,padding = 'same')(bn_layer_list[-1])
            hidden_layer  = layers.PReLU()(con2d_layer)
            bn_layer_list.append(layers.BatchNormalization()(hidden_layer))
            
            imagesize     = math.ceil(imagesize / stride)
            if imagesize == 1:
                keep = False
         
        keep = True
   
        while keep:
            if imagesize != 1:
                input_data = layers.concatenate([bn_layer, bn_layer_list.pop()])
            else:
                input_data = bn_layer_list.pop()
            upsample_layer = layers.UpSampling2D((4,4))(input_data)
            con2d_layer    = layers.Conv2D(nodes, 3, stride,padding = 'same')(upsample_layer)
            hidden_layer   = layers.PReLU()(con2d_layer)
            bn_layer       = layers.BatchNormalization()(hidden_layer)
            imagesize      = math.ceil(imagesize * stride)
            if max_count != 1:
                max_count -= 1
            else:
                nodes /= 2
            if imagesize == (start_imagesize / 2):
                keep = False

        upsample_layer = layers.UpSampling2D((4,4))(bn_layer)
        output_layer   = layers.Conv2D(3, 3, stride,padding = 'same', activation = 'tanh')(upsample_layer)
        #output_layer = layers.Conv2DTranspose(3, 3, stride,padding = 'same', activation = 'tanh')(bn_layer)
        #con2d_layer   = layers.Conv2D(64, 3, padding = 'same', activation = 'relu')(bn_layer_1)
        #bn_layer_2    = layers.BatchNormalization()(con2d_layer)
        #add_layer_1   = layers.add([bn_layer_1, bn_layer_2])
        #con2d_layer   = layers.Conv2D(64, 3, padding = 'same', activation = 'relu')(add_layer_1)
        #bn_layer_3    = layers.BatchNormalization()(con2d_layer)
        #add_layer_2   = layers.add([add_layer_1, bn_layer_3])
        #con2d_layer   = layers.Conv2D(64, 3, padding = 'same', activation = 'relu')(add_layer_2)
        #bn_layer_4    = layers.BatchNormalization()(con2d_layer)
        #add_layer_3   = layers.add([add_layer_2, bn_layer_4])
        #con2d_layer   = layers.Conv2D(3, 3, padding = 'same', activation = 'tanh')(add_layer_3)
        #output_layer  = layers.maximum([input_layer, con2d_layer])

        return Model(input_layer, output_layer)
コード例 #12
0
def upsample_simple(filters, kernel_size, strides, padding):
    return layers.UpSampling2D(strides)
コード例 #13
0
c9 = layers.Conv2D(8, (3, 3),
                   kernel_initializer='he_uniform',
                   bias_initializer='zeros',
                   activation='relu',
                   padding='same')(u9)
c9 = layers.Conv2D(8, (3, 3),
                   kernel_initializer='he_uniform',
                   bias_initializer='zeros',
                   activation='relu',
                   padding='same')(c9)

d = layers.Conv2D(1, (1, 1), activation='sigmoid')(c9)
d = layers.Cropping2D((EDGE_CROP, EDGE_CROP))(d)
d = layers.ZeroPadding2D((EDGE_CROP, EDGE_CROP))(d)
if NET_SCALING is not None:
    d = layers.UpSampling2D(NET_SCALING)(d)

seg_model = models.Model(inputs=[input_img], outputs=[d])
print()
#print()
#print(seg_model.summary())
print()


## evalaution criteria
# dice coefficicent
def dice_coef(y_true, y_pred, smooth=1):
    intersection = K.sum(y_true * y_pred, axis=[1, 2, 3])
    union = K.sum(y_true, axis=[1, 2, 3]) + K.sum(y_pred, axis=[1, 2, 3])
    return K.mean((2. * intersection + smooth) / (union + smooth), axis=0)
コード例 #14
0
ファイル: stage1_wgangp.py プロジェクト: 245charan/Pixir
    def _build_generator(self):
        embedding = layers.Input(shape=(self.embedding_dim, ))
        x = layers.Dense(256)(embedding)
        mean_logsigma = layers.LeakyReLU(0.2)(x)
        c = layers.Lambda(generate_c)(mean_logsigma)

        z_noise = layers.Input(shape=(100, ))

        gen_input = layers.Concatenate(axis=1)([c, z_noise])

        x = layers.Dense(128 * 8 * 4 * 4, use_bias=False)(gen_input)

        x = layers.Reshape((4, 4, 128 * 8), input_shape=(128 * 8 * 4 * 4, ))(x)
        x = layers.BatchNormalization()(x)
        x = layers.ReLU()(x)

        # x = layers.Conv2DTranspose(512, kernel_size=4, strides=2, padding='same', use_bias=False)(x)
        x = layers.UpSampling2D(size=(2, 2))(x)
        x = layers.Conv2D(512,
                          kernel_size=3,
                          padding='same',
                          strides=1,
                          use_bias=False)(x)
        x = layers.BatchNormalization()(x)
        x = layers.ReLU()(x)

        # x = layers.Conv2DTranspose(256, kernel_size=4, strides=2, padding='same', use_bias=False)(x)
        x = layers.UpSampling2D(size=(2, 2))(x)
        x = layers.Conv2D(256,
                          kernel_size=3,
                          padding='same',
                          strides=1,
                          use_bias=False)(x)
        x = layers.BatchNormalization()(x)
        x = layers.ReLU()(x)

        # x = layers.Conv2DTranspose(128, kernel_size=4, strides=2, padding='same', use_bias=False)(x)
        x = layers.UpSampling2D(size=(2, 2))(x)
        x = layers.Conv2D(128,
                          kernel_size=3,
                          padding='same',
                          strides=1,
                          use_bias=False)(x)
        x = layers.BatchNormalization()(x)
        x = layers.ReLU()(x)

        # x = layers.Conv2DTranspose(64, kernel_size=3, strides=2, padding='same', use_bias=False)(x)
        x = layers.UpSampling2D(size=(2, 2))(x)
        x = layers.Conv2D(64,
                          kernel_size=3,
                          padding='same',
                          strides=1,
                          use_bias=False)(x)
        x = layers.BatchNormalization()(x)
        x = layers.ReLU()(x)

        # x = layers.Conv2DTranspose(3, kernel_size=4, strides=2, padding='same', use_bias=False)(x)
        x = layers.Conv2D(3,
                          kernel_size=1,
                          padding='same',
                          strides=1,
                          use_bias=False)(x)
        x = layers.Activation(activation='tanh')(x)

        self.generator = Model(inputs=[embedding, z_noise],
                               outputs=[x, mean_logsigma])
x0 = fun.Inception(x, 64, name='x0')(x)

x1 = L.MaxPool2D(pool_size=(SIZE_SUB, SIZE_SUB),
                 strides=(SIZE_SUB, SIZE_SUB),
                 padding='valid')(x0)

x1 = fun.Inception(x1, 128, name='x1')(x1)
x2 = L.MaxPool2D(pool_size=(SIZE_TOP, SIZE_TOP),
                 strides=(SIZE_TOP, SIZE_TOP),
                 padding='valid')(x1)
xg = x2

xg = L.Dense(256, activation='relu', name='x2')(xg)

y2 = xg
y1 = L.UpSampling2D(size=(SIZE_TOP, SIZE_TOP))(y2)
y1 = L.Concatenate()([x1, y1])
y1 = fun.Inception(y1, 128, name='y1')(y1)

y0 = L.UpSampling2D(size=(SIZE_SUB, SIZE_SUB))(y1)
y0 = L.Concatenate()([x0, y0])
y0 = fun.Inception(y0, 64, name='y0')(y0)

y = L.Dense(50, activation='softmax')(y0)

ouputs = y
ouputs = L.Multiply()([ouputs, mask1])
not_mask = L.Lambda(lambda x: 1 - x)(mask)

ouputs = L.Concatenate(name="segment_out")([ouputs, not_mask])
model = keras.Model(inputs=inputs1, outputs=[ouputs])
コード例 #16
0
    def __init__(self,
                 color=False,
                 burst_length=8,
                 blind_est=False,
                 sep_conv=False,
                 kernel_size=[5],
                 channel_att=False,
                 spatial_att=False,
                 core_bias=False,
                 use_bias=True):
        # 注意,输入参数 kerel_size是跟kernel prediction networks一样的filter size
        # 注意,如果是blind_est,则输入端不加入noise 的先验知识,否则要将一个input channel留给noise
        # 注意,burst_length是时间序列,如video放映那种
        super(KPN, self).__init__()
        self.burst_length = burst_length
        self.core_bias = core_bias
        self.use_bias = use_bias
        self.color_channel = 3 if color else 1

        out_channel = (3 if color else
                       1) * (2 * sum(kernel_size) if sep_conv else np.sum(
                           np.array(kernel_size)**2)) * burst_length
        if core_bias:
            out_channel += (3 if color else 1) * burst_length

        # encoder,注意,maxpool和upsampling是没有模型参数的,也可以直接写在__call__里面当函数调用
        self.conv1 = Basic(64,
                           channel_att=False,
                           spatial_att=False,
                           use_bias=self.use_bias)
        self.avgpool1 = layers.AveragePooling2D(pool_size=(2, 2))
        self.conv2 = Basic(128,
                           channel_att=False,
                           spatial_att=False,
                           use_bias=self.use_bias)
        self.avgpool2 = layers.AveragePooling2D(pool_size=(2, 2))
        self.conv3 = Basic(256,
                           channel_att=False,
                           spatial_att=False,
                           use_bias=self.use_bias)
        self.avgpool3 = layers.AveragePooling2D(pool_size=(2, 2))
        self.conv4 = Basic(256,
                           channel_att=False,
                           spatial_att=False,
                           use_bias=self.use_bias)

        # decoder
        self.up4 = layers.UpSampling2D(size=(2, 2), interpolation='bilinear')
        self.conv5 = Basic(256,
                           channel_att=channel_att,
                           spatial_att=spatial_att,
                           use_bias=self.use_bias)
        self.up5 = layers.UpSampling2D(size=(2, 2), interpolation='bilinear')
        self.conv6 = Basic(128,
                           channel_att=channel_att,
                           spatial_att=spatial_att,
                           use_bias=self.use_bias)
        self.up6 = layers.UpSampling2D(size=(2, 2), interpolation='bilinear')
        self.conv7 = Basic(64,
                           channel_att=channel_att,
                           spatial_att=spatial_att,
                           use_bias=self.use_bias)
        self.outc = Basic(out_channel,
                          channel_att=channel_att,
                          spatial_att=spatial_att,
                          use_bias=self.use_bias)

        self.kernel_pred = KernelConv(kernel_size, sep_conv, self.core_bias)
コード例 #17
0
                            in_resized[i] * 255, o * 255,
                            out_sample_images[i] * 255
                        ],
                                       axis=1)) for i, o in enumerate(preds)
                ]
            },
            commit=False)


model = Sequential()
model.add(
    layers.Conv2D(3, (3, 3),
                  activation='relu',
                  padding='same',
                  input_shape=(config.input_width, config.input_height, 3)))
model.add(layers.UpSampling2D())
model.add(layers.Conv2D(3, (3, 3), activation='relu', padding='same'))
model.add(layers.UpSampling2D())
model.add(layers.Conv2D(3, (3, 3), activation='relu', padding='same'))
model.add(layers.UpSampling2D())
model.add(layers.Conv2D(3, (3, 3), activation='relu', padding='same'))

# DONT ALTER metrics=[perceptual_distance]
model.compile(optimizer='adam', loss='mse', metrics=[perceptual_distance])

model.fit_generator(image_generator(config.batch_size, train_dir),
                    steps_per_epoch=config.steps_per_epoch,
                    epochs=config.num_epochs,
                    callbacks=[ImageLogger(), WandbCallback()],
                    validation_steps=config.val_steps_per_epoch,
                    validation_data=val_generator)
コード例 #18
0
# but also reusing its weights

encoder_input = keras.Input(shape=(28, 28, 1), name='original_img')
x = layers.Conv2D(16, 3, activation='relu')(encoder_input)
x = layers.Conv2D(32, 3, activation='relu')(x)
x = layers.MaxPooling2D(3)(x)
x = layers.Conv2D(32, 3, activation='relu')(x)
x = layers.Conv2D(16, 3, activation='relu')(x)
encoder_output = layers.GlobalMaxPooling2D()(x)

encoder = keras.Model(encoder_input, encoder_output, name='encoder')
encoder.summary()

decoder_input = keras.Input(shape=(16,), name='encoded_img')
x = layers.Reshape((4, 4, 1))(decoder_input)
x = layers.Conv2DTranspose(16, 3, activation='relu')(x)
x = layers.Conv2DTranspose(32, 3, activation='relu')(x)
x = layers.UpSampling2D(3)(x)
x = layers.Conv2DTranspose(16, 3, activation='relu')(x)
decoder_output = layers.Conv2DTranspose(1, 3, activation='relu')(x)

decoder = keras.Model(decoder_input, decoder_output, name='decoder')
decoder.summary()

autoencoder_input = keras.Input(shape=(28, 28, 1), name='img')
encoded_img = encoder(autoencoder_input)
decoded_img = decoder(encoded_img)
autoencoder = keras.Model(autoencoder_input, decoded_img, name='autoencoder')
autoencoder.summary()

コード例 #19
0
def call(self, x):
    size = x.size()[2:]
    pool = self.gap(x)
    out = layers.UpSampling2D(size, interpolation='bilinear')(pool)
    return out
コード例 #20
0
    def __init__(self, n_input_channels, n_output_channels, n_filters):
        inputs = layers.Input((None, None, n_input_channels))
        conv1 = layers.Conv2D(n_filters,
                              3,
                              activation='relu',
                              padding='same',
                              kernel_initializer=GlorotNormal())(inputs)
        conv1 = layers.BatchNormalization()(conv1)
        conv1 = layers.Conv2D(n_filters,
                              3,
                              activation='relu',
                              padding='same',
                              kernel_initializer=GlorotNormal())(conv1)
        conv1 = layers.BatchNormalization()(conv1)
        pool1 = layers.MaxPooling2D(pool_size=(2, 2))(conv1)

        conv2 = layers.Conv2D(n_filters * 2,
                              3,
                              activation='relu',
                              padding='same',
                              kernel_initializer=GlorotNormal())(pool1)
        conv2 = layers.BatchNormalization()(conv2)
        conv2 = layers.Conv2D(n_filters * 2,
                              3,
                              activation='relu',
                              padding='same',
                              kernel_initializer=GlorotNormal())(conv2)
        conv2 = layers.BatchNormalization()(conv2)
        pool2 = layers.MaxPooling2D(pool_size=(2, 2))(conv2)

        conv3 = layers.Conv2D(n_filters * 4,
                              3,
                              activation='relu',
                              padding='same',
                              kernel_initializer=GlorotNormal())(pool2)
        conv3 = layers.BatchNormalization()(conv3)
        conv3 = layers.Conv2D(n_filters * 4,
                              3,
                              activation='relu',
                              padding='same',
                              kernel_initializer=GlorotNormal())(conv3)
        conv3 = layers.BatchNormalization()(conv3)
        pool3 = layers.MaxPooling2D(pool_size=(2, 2))(conv3)

        conv4 = layers.Conv2D(n_filters * 8,
                              3,
                              activation='relu',
                              padding='same',
                              kernel_initializer=GlorotNormal())(pool3)
        conv4 = layers.BatchNormalization()(conv4)
        conv4 = layers.Conv2D(n_filters * 8,
                              3,
                              activation='relu',
                              padding='same',
                              kernel_initializer=GlorotNormal())(conv4)
        conv4 = layers.BatchNormalization()(conv4)
        drop4 = layers.Dropout(0.5)(conv4)
        pool4 = layers.MaxPooling2D(pool_size=(2, 2))(drop4)

        conv5 = layers.Conv2D(n_filters * 16,
                              3,
                              activation='relu',
                              padding='same',
                              kernel_initializer=GlorotNormal())(pool4)
        conv5 = layers.BatchNormalization()(conv5)
        conv5 = layers.Conv2D(n_filters * 16,
                              3,
                              activation='relu',
                              padding='same',
                              kernel_initializer=GlorotNormal())(conv5)
        conv5 = layers.BatchNormalization()(conv5)
        drop5 = layers.Dropout(0.5)(conv5)
        up6 = layers.Conv2D(n_filters * 8,
                            3,
                            activation='relu',
                            padding='same',
                            kernel_initializer=GlorotNormal())(
                                layers.UpSampling2D(size=(2, 2))(drop5))
        merge6 = layers.Concatenate(axis=-1)([conv4, up6])

        conv6 = layers.Conv2D(n_filters * 8,
                              3,
                              activation='relu',
                              padding='same',
                              kernel_initializer=GlorotNormal())(merge6)
        conv6 = layers.BatchNormalization()(conv6)
        conv6 = layers.Conv2D(n_filters * 8,
                              3,
                              activation='relu',
                              padding='same',
                              kernel_initializer=GlorotNormal())(conv6)
        conv6 = layers.BatchNormalization()(conv6)
        up7 = layers.Conv2D(n_filters * 4,
                            3,
                            activation='relu',
                            padding='same',
                            kernel_initializer=GlorotNormal())(
                                layers.UpSampling2D(size=(2, 2))(conv6))
        merge7 = layers.Concatenate(axis=-1)([conv3, up7])

        conv7 = layers.Conv2D(n_filters * 4,
                              3,
                              activation='relu',
                              padding='same',
                              kernel_initializer=GlorotNormal())(merge7)
        conv7 = layers.BatchNormalization()(conv7)
        conv7 = layers.Conv2D(n_filters * 4,
                              3,
                              activation='relu',
                              padding='same',
                              kernel_initializer=GlorotNormal())(conv7)
        conv7 = layers.BatchNormalization()(conv7)
        up8 = layers.Conv2D(n_filters * 2,
                            3,
                            activation='relu',
                            padding='same',
                            kernel_initializer=GlorotNormal())(
                                layers.UpSampling2D(size=(2, 2))(conv7))
        merge8 = layers.Concatenate(axis=-1)([conv2, up8])

        conv8 = layers.Conv2D(n_filters * 2,
                              3,
                              activation='relu',
                              padding='same',
                              kernel_initializer=GlorotNormal())(merge8)
        conv8 = layers.BatchNormalization()(conv8)
        conv8 = layers.Conv2D(n_filters * 2,
                              3,
                              activation='relu',
                              padding='same',
                              kernel_initializer=GlorotNormal())(conv8)
        conv8 = layers.BatchNormalization()(conv8)
        up9 = layers.Conv2D(n_filters,
                            2,
                            activation='relu',
                            padding='same',
                            kernel_initializer=GlorotNormal())(
                                layers.UpSampling2D(size=(2, 2))(conv8))
        merge9 = layers.Concatenate(axis=-1)([conv1, up9])

        conv9 = layers.Conv2D(n_filters,
                              3,
                              activation='relu',
                              padding='same',
                              kernel_initializer=GlorotNormal())(merge9)
        conv9 = layers.BatchNormalization()(conv9)
        conv9 = layers.Conv2D(n_filters,
                              3,
                              activation='relu',
                              padding='same',
                              kernel_initializer=GlorotNormal())(conv9)
        conv9 = layers.BatchNormalization()(conv9)
        conv9 = layers.Conv2D(2,
                              3,
                              activation='relu',
                              padding='same',
                              kernel_initializer=GlorotNormal())(conv9)
        conv9 = layers.BatchNormalization()(conv9)
        conv10 = layers.Conv2D(n_output_channels, 1,
                               activation='softmax')(conv9)

        self.model = tf.keras.Model(inputs=inputs, outputs=conv10)
コード例 #21
0
def _hourglass_module(input, stage_index, number_of_keypoints):
    if stage_index == 0:
        return _inverted_bottleneck(input,
                                    up_channel_rate=6,
                                    channels=24,
                                    is_subsample=False,
                                    kernel_size=3), []
    else:
        # down sample
        x = layers.MaxPool2D(pool_size=(2, 2), strides=(2, 2),
                             padding='SAME')(input)

        # block front
        x = _inverted_bottleneck(x,
                                 up_channel_rate=6,
                                 channels=24,
                                 is_subsample=False,
                                 kernel_size=3)
        x = _inverted_bottleneck(x,
                                 up_channel_rate=6,
                                 channels=24,
                                 is_subsample=False,
                                 kernel_size=3)
        x = _inverted_bottleneck(x,
                                 up_channel_rate=6,
                                 channels=24,
                                 is_subsample=False,
                                 kernel_size=3)
        x = _inverted_bottleneck(x,
                                 up_channel_rate=6,
                                 channels=24,
                                 is_subsample=False,
                                 kernel_size=3)
        x = _inverted_bottleneck(x,
                                 up_channel_rate=6,
                                 channels=24,
                                 is_subsample=False,
                                 kernel_size=3)

        stage_index -= 1

        # block middle
        x, middle_layers = _hourglass_module(
            x,
            stage_index=stage_index,
            number_of_keypoints=number_of_keypoints)

        # block back
        x = _inverted_bottleneck(x,
                                 up_channel_rate=6,
                                 channels=number_of_keypoints,
                                 is_subsample=False,
                                 kernel_size=3)

        # up sample
        upsampling_size = (2, 2)  # (x.shape[1] * 2, x.shape[2] * 2)
        x = layers.UpSampling2D(size=upsampling_size,
                                interpolation='bilinear')(x)
        upsampling_layer = x

        # jump layer
        x = _inverted_bottleneck(input,
                                 up_channel_rate=6,
                                 channels=24,
                                 is_subsample=False,
                                 kernel_size=3)
        x = _inverted_bottleneck(x,
                                 up_channel_rate=6,
                                 channels=24,
                                 is_subsample=False,
                                 kernel_size=3)
        x = _inverted_bottleneck(x,
                                 up_channel_rate=6,
                                 channels=24,
                                 is_subsample=False,
                                 kernel_size=3)
        x = _inverted_bottleneck(x,
                                 up_channel_rate=6,
                                 channels=24,
                                 is_subsample=False,
                                 kernel_size=3)
        x = _inverted_bottleneck(x,
                                 up_channel_rate=6,
                                 channels=number_of_keypoints,
                                 is_subsample=False,
                                 kernel_size=3)
        jump_branch_layer = x

        # add
        x = upsampling_layer + jump_branch_layer

        middle_layers.append(x)

        return x, middle_layers
コード例 #22
0
def UNet(input_size=(224, 224, 3), num_classes=1):
    inputs = layers.Input(input_size)

    conv1 = layers.Conv2D(64,
                          3,
                          activation="relu",
                          padding="same",
                          kernel_initializer="he_normal")(inputs)
    conv1 = layers.Conv2D(64,
                          3,
                          activation="relu",
                          padding="same",
                          kernel_initializer="he_normal")(conv1)
    pool1 = layers.MaxPooling2D(pool_size=(2, 2))(conv1)

    conv2 = layers.Conv2D(128,
                          3,
                          activation="relu",
                          padding="same",
                          kernel_initializer="he_normal")(pool1)
    conv2 = layers.Conv2D(128,
                          3,
                          activation="relu",
                          padding="same",
                          kernel_initializer="he_normal")(conv2)
    pool2 = layers.MaxPooling2D(pool_size=(2, 2))(conv2)

    conv3 = layers.Conv2D(256,
                          3,
                          activation="relu",
                          padding="same",
                          kernel_initializer="he_normal")(pool2)
    conv3 = layers.Conv2D(256,
                          3,
                          activation="relu",
                          padding="same",
                          kernel_initializer="he_normal")(conv3)
    pool3 = layers.MaxPooling2D(pool_size=(2, 2))(conv3)

    conv4 = layers.Conv2D(512,
                          3,
                          activation="relu",
                          padding="same",
                          kernel_initializer="he_normal")(pool3)
    conv4 = layers.Conv2D(512,
                          3,
                          activation="relu",
                          padding="same",
                          kernel_initializer="he_normal")(conv4)
    drop4 = layers.Dropout(0.5)(conv4)
    pool4 = layers.MaxPooling2D(pool_size=(2, 2))(drop4)

    conv5 = layers.Conv2D(1024,
                          3,
                          activation="relu",
                          padding="same",
                          kernel_initializer="he_normal")(pool4)
    conv5 = layers.Conv2D(1024,
                          3,
                          activation="relu",
                          padding="same",
                          kernel_initializer="he_normal")(conv5)
    drop5 = layers.Dropout(0.5)(conv5)

    up6 = layers.Conv2D(512,
                        2,
                        activation="relu",
                        padding="same",
                        kernel_initializer="he_normal")(
                            layers.UpSampling2D(size=(2, 2))(drop5))
    merge6 = layers.concatenate([drop4, up6], axis=3)
    conv6 = layers.Conv2D(512,
                          3,
                          activation="relu",
                          padding="same",
                          kernel_initializer="he_normal")(merge6)
    conv6 = layers.Conv2D(512,
                          3,
                          activation="relu",
                          padding="same",
                          kernel_initializer="he_normal")(conv6)

    up7 = layers.Conv2D(256,
                        2,
                        activation="relu",
                        padding="same",
                        kernel_initializer="he_normal")(
                            layers.UpSampling2D(size=(2, 2))(conv6))
    merge7 = layers.concatenate([conv3, up7], axis=3)
    conv7 = layers.Conv2D(256,
                          3,
                          activation="relu",
                          padding="same",
                          kernel_initializer="he_normal")(merge7)
    conv7 = layers.Conv2D(256,
                          3,
                          activation="relu",
                          padding="same",
                          kernel_initializer="he_normal")(conv7)

    up8 = layers.Conv2D(128,
                        2,
                        activation="relu",
                        padding="same",
                        kernel_initializer="he_normal")(
                            layers.UpSampling2D(size=(2, 2))(conv7))
    merge8 = layers.concatenate([conv2, up8], axis=3)
    conv8 = layers.Conv2D(128,
                          3,
                          activation="relu",
                          padding="same",
                          kernel_initializer="he_normal")(merge8)
    conv8 = layers.Conv2D(128,
                          3,
                          activation="relu",
                          padding="same",
                          kernel_initializer="he_normal")(conv8)

    up9 = layers.Conv2D(64,
                        2,
                        activation="relu",
                        padding="same",
                        kernel_initializer="he_normal")(
                            layers.UpSampling2D(size=(2, 2))(conv8))
    merge9 = layers.concatenate([conv1, up9], axis=3)
    conv9 = layers.Conv2D(64,
                          3,
                          activation="relu",
                          padding="same",
                          kernel_initializer="he_normal")(merge9)
    conv9 = layers.Conv2D(64,
                          3,
                          activation="relu",
                          padding="same",
                          kernel_initializer="he_normal")(conv9)
    conv9 = layers.Conv2D(2,
                          3,
                          activation="relu",
                          padding="same",
                          kernel_initializer="he_normal")(conv9)
    final_activation = "softmax" if num_classes > 1 else "sigmoid"
    conv10 = layers.Conv2D(num_classes, 1, activation=final_activation)(conv9)

    model = models.Model(inputs=inputs, outputs=conv10)

    return model
コード例 #23
0
ファイル: unet_gpm_z.py プロジェクト: ANU-WALD/pluvi_pondus
def Unet():
    concat_axis = 3
    ref_input = layers.Input(shape = (1024, 1024, 2))
    z_input = layers.Input(shape = (256, 256, 3))

    feats = 16
    bn0 = layers.BatchNormalization(axis=3)(ref_input)
    conv1 = layers.Conv2D(feats, (3, 3), activation='relu', padding='same', name='conv1_1')(bn0)
    bn1 = layers.BatchNormalization(axis=3)(conv1)
    conv1 = layers.Conv2D(feats, (3, 3), activation='relu', padding='same')(bn1)
    bn2 = layers.BatchNormalization(axis=3)(conv1)
    pool1 = layers.MaxPooling2D(pool_size=(2, 2))(bn2)
    conv2 = layers.Conv2D(2*feats, (3, 3), activation='relu', padding='same')(pool1)
    bn3 = layers.BatchNormalization(axis=3)(conv2)
    conv2 = layers.Conv2D(2*feats, (3, 3), activation='relu', padding='same')(bn3)
    bn4 = layers.BatchNormalization(axis=3)(conv2)
    pool2 = layers.MaxPooling2D(pool_size=(2, 2))(bn4)
    
    zadd = layers.concatenate([z_input, pool2], axis=concat_axis)
    nzadd = layers.BatchNormalization(axis=3)(zadd)

    conv3 = layers.Conv2D(4*feats, (3, 3), activation='relu', padding='same')(nzadd)
    bn5 = layers.BatchNormalization(axis=3)(conv3)
    conv3 = layers.Conv2D(4*feats, (3, 3), activation='relu', padding='same')(bn5)
    bn6 = layers.BatchNormalization(axis=3)(conv3)
    pool3 = layers.MaxPooling2D(pool_size=(2, 2))(bn6)
   

    conv4 = layers.Conv2D(8*feats, (3, 3), activation='relu', padding='same')(pool3)
    bn7 = layers.BatchNormalization(axis=3)(conv4)
    conv4 = layers.Conv2D(8*feats, (3, 3), activation='relu', padding='same')(bn7)
    bn8 = layers.BatchNormalization(axis=3)(conv4)
    pool4 = layers.MaxPooling2D(pool_size=(2, 2))(bn8)


    conv5 = layers.Conv2D(16*feats, (3, 3), activation='relu', padding='same')(pool4)
    bn9 = layers.BatchNormalization(axis=3)(conv5)
    conv5 = layers.Conv2D(16*feats, (3, 3), activation='relu', padding='same')(bn9)
    bn10 = layers.BatchNormalization(axis=3)(conv5)
    pool5 = layers.MaxPooling2D(pool_size=(2, 2))(bn10)

    conv6 = layers.Conv2D(32*feats, (3, 3), activation='relu', padding='same')(pool5)
    bn11 = layers.BatchNormalization(axis=3)(conv6)
    conv6 = layers.Conv2D(32*feats, (3, 3), activation='relu', padding='same')(bn11)
    bn12 = layers.BatchNormalization(axis=3)(conv6)

    up_conv6 = layers.UpSampling2D(size=(2, 2))(bn12)
    up7 = layers.concatenate([up_conv6, conv5], axis=concat_axis)

    conv7 = layers.Conv2D(16*feats, (3, 3), activation='relu', padding='same')(up7)
    bn13 = layers.BatchNormalization(axis=3)(conv6)
    conv7 = layers.Conv2D(16*feats, (3, 3), activation='relu', padding='same')(bn12)
    bn14 = layers.BatchNormalization(axis=3)(conv6)
    
    up_conv5 = layers.UpSampling2D(size=(2, 2))(bn10)
    up6 = layers.concatenate([up_conv5, conv4], axis=concat_axis)

    conv6 = layers.Conv2D(8*feats, (3, 3), activation='relu', padding='same')(up6)
    bn15 = layers.BatchNormalization(axis=3)(conv6)
    conv6 = layers.Conv2D(8*feats, (3, 3), activation='relu', padding='same')(bn15)
    bn16 = layers.BatchNormalization(axis=3)(conv6)

    up_conv6 = layers.UpSampling2D(size=(2, 2))(bn16)
    up7 = layers.concatenate([up_conv6, conv3], axis=concat_axis)
    conv7 = layers.Conv2D(4*feats, (3, 3), activation='relu', padding='same')(up7)
    bn13 = layers.BatchNormalization(axis=3)(conv7)
    conv7 = layers.Conv2D(4*feats, (3, 3), activation='relu', padding='same')(bn13)
    bn14 = layers.BatchNormalization(axis=3)(conv7)

    up_conv7 = layers.UpSampling2D(size=(2, 2))(bn14)
    up8 = layers.concatenate([up_conv7, conv2], axis=concat_axis)
    conv8 = layers.Conv2D(2*feats, (3, 3), activation='relu', padding='same')(up8)
    bn15 = layers.BatchNormalization(axis=3)(conv8)
    conv8 = layers.Conv2D(2*feats, (3, 3), activation='relu', padding='same')(bn15)
    bn16 = layers.BatchNormalization(axis=3)(conv8)

    conv10 = layers.Conv2D(1, (1, 1), activation='relu')(bn16)
    #bn19 = BatchNormalization(axis=3)(conv10)

    model = tf.keras.models.Model(inputs=[ref_input,z_input], outputs=conv10)

    return model
コード例 #24
0
route = ''
route = ''

maxpool = l.MaxPool2D(pool_size=(13, 13), strides=1, padding='same')(layer)
layer = tf.concat([layer, maxpool, maxpool, maxpool], axis=-1) #route

layer = conv(layer, 512, 1, 1)
layer = conv(layer, 512, 3, 1)
layer = tf.concat([layer, conv7], axis=-1) #route

layer = conv(layer, 512, 1, 1)

route1 = layer

layer = conv(layer, 256, 1, 1)
layer = l.UpSampling2D(size=(2, 2))(layer)

layer = tf.concat([layer, conv5], axis=-1) #route

layer = conv(layer, 256, 1, 1)
conv8 = conv(layer, 256, 1, 1)
route = ''
layer = conv(layer, 256, 1, 1)
layer = conv(layer, 256, 3, 1)
layer = conv(layer, 256, 1, 1)
layer = conv(layer, 256, 3, 1)
layer = tf.concat([layer, conv8], axis=-1) #route

layer = conv(layer, 256, 1, 1)
route2 = layer
layer = conv(layer, 128, 1, 1)
コード例 #25
0
ファイル: model.py プロジェクト: remzawi/cs231n-project
def VanillaUnet(num_class = 1, img_shape = (256,256,3)):

    concat_axis = 3
    # input
    inputs = layers.Input(shape = img_shape)

    # Unet convolution block 1
    conv1 = layers.Conv2D(32, (3, 3), activation='relu', padding='same', name='conv1_1')(inputs)
    conv1 = layers.Conv2D(32, (3, 3), activation='relu', padding='same')(conv1)
    pool1 = layers.MaxPooling2D(pool_size=(2, 2))(conv1)

    # Unet convolution block 2
    conv2 = layers.Conv2D(64, (3, 3), activation='relu', padding='same')(pool1)
    conv2 = layers.Conv2D(64, (3, 3), activation='relu', padding='same')(conv2)
    pool2 = layers.MaxPooling2D(pool_size=(2, 2))(conv2)

    # Unet convolution block 3
    conv3 = layers.Conv2D(128, (3, 3), activation='relu', padding='same')(pool2)
    conv3 = layers.Conv2D(128, (3, 3), activation='relu', padding='same')(conv3)
    pool3 = layers.MaxPooling2D(pool_size=(2, 2))(conv3)

    # Unet convolution block 4
    conv4 = layers.Conv2D(256, (3, 3), activation='relu', padding='same')(pool3)
    conv4 = layers.Conv2D(256, (3, 3), activation='relu', padding='same')(conv4)
    pool4 = layers.MaxPooling2D(pool_size=(2, 2))(conv4)

    # Unet convolution block 5
    conv5 = layers.Conv2D(512, (3, 3), activation='relu', padding='same')(pool4)
    conv5 = layers.Conv2D(512, (3, 3), activation='relu', padding='same')(conv5)

    # Unet up-sampling block 1; Concatenation with crop_conv4
    up_conv5 = layers.UpSampling2D(size=(2, 2))(conv5)
    ch, cw = get_crop_shape(conv4, up_conv5)
    crop_conv4 = layers.Cropping2D(cropping=(ch,cw))(conv4)
    up6 = layers.concatenate([up_conv5, crop_conv4], axis=concat_axis)
    conv6 = layers.Conv2D(256, (3, 3), activation='relu', padding='same')(up6)
    conv6 = layers.Conv2D(256, (3, 3), activation='relu', padding='same')(conv6)

    # Unet up-sampling block 2; Concatenation with crop_conv3
    up_conv6 = layers.UpSampling2D(size=(2, 2))(conv6)
    ch, cw = get_crop_shape(conv3, up_conv6)
    crop_conv3 = layers.Cropping2D(cropping=(ch,cw))(conv3)
    up7 = layers.concatenate([up_conv6, crop_conv3], axis=concat_axis) 
    conv7 = layers.Conv2D(128, (3, 3), activation='relu', padding='same')(up7)
    conv7 = layers.Conv2D(128, (3, 3), activation='relu', padding='same')(conv7)

    # Unet up-sampling block 3; Concatenation with crop_conv2
    up_conv7 = layers.UpSampling2D(size=(2, 2))(conv7)
    ch, cw = get_crop_shape(conv2, up_conv7)
    crop_conv2 = layers.Cropping2D(cropping=(ch,cw))(conv2)
    up8 = layers.concatenate([up_conv7, crop_conv2], axis=concat_axis)
    conv8 = layers.Conv2D(64, (3, 3), activation='relu', padding='same')(up8)
    conv8 = layers.Conv2D(64, (3, 3), activation='relu', padding='same')(conv8)

    # Unet up-sampling block 4; Concatenation with crop_conv1
    up_conv8 = layers.UpSampling2D(size=(2, 2))(conv8)
    ch, cw = get_crop_shape(conv1, up_conv8)
    crop_conv1 = layers.Cropping2D(cropping=(ch,cw))(conv1)

    up9 = layers.concatenate([up_conv8, crop_conv1], axis=concat_axis)
    conv9 = layers.Conv2D(32, (3, 3), activation='relu', padding='same')(up9)
    conv9 = layers.Conv2D(32, (3, 3), activation='relu', padding='same')(conv9)

    ch, cw = get_crop_shape(inputs, conv9)
    conv9 = layers.ZeroPadding2D(padding=((ch[0], ch[1]), (cw[0], cw[1])))(conv9)
    conv10 = layers.Conv2D(num_class, (1, 1))(conv9)
    model = Model(inputs=inputs, outputs=conv10)

    return model
コード例 #26
0
def dbnet(input_size=640, k=50):
    image_input = layers.Input(shape=(None, None, 3))
    gt_input = layers.Input(shape=(input_size, input_size))
    mask_input = layers.Input(shape=(input_size, input_size))
    thresh_input = layers.Input(shape=(input_size, input_size))
    thresh_mask_input = layers.Input(shape=(input_size, input_size))
    #backbone = ResNet50(inputs=image_input, include_top=False, freeze_bn=True)
    backbone = ResNet50V2(include_top=False, weights='imagenet', input_tensor = image_input)
    #get output layers
    output_names = ['conv2_block3_out','conv3_block4_out','conv4_block6_out','conv5_block3_out']
    [C2, C3, C4, C5] = [x.output for x in backbone.layers if x.name in output_names]
    in2 = layers.Conv2D(256, (1, 1), padding='same', kernel_initializer='he_normal', name='in2')(C2)
    in3 = layers.Conv2D(256, (1, 1), padding='same', kernel_initializer='he_normal', name='in3')(C3)
    in4 = layers.Conv2D(256, (1, 1), padding='same', kernel_initializer='he_normal', name='in4')(C4)
    in5 = layers.Conv2D(256, (1, 1), padding='same', kernel_initializer='he_normal', name='in5')(C5)
    print("IN shapes")
    print(in2.shape)
    print(in3.shape)
    print(in4.shape)
    print(in5.shape)

    # 1 / 32 * 8 = 1 / 4
    P5 = layers.UpSampling2D(size=(8, 8))(
        layers.Conv2D(64, (3, 3), padding='same', kernel_initializer='he_normal')(in5))
    # 1 / 16 * 4 = 1 / 4
    out4 = layers.Add()([in4, layers.UpSampling2D(size=(2, 2))(in5)])
    P4 = layers.UpSampling2D(size=(4, 4))(
        layers.Conv2D(64, (3, 3), padding='same', kernel_initializer='he_normal')(out4))
    # 1 / 8 * 2 = 1 / 4
    out3 = layers.Add()([in3, layers.UpSampling2D(size=(2, 2))(out4)])
    P3 = layers.UpSampling2D(size=(2, 2))(
        layers.Conv2D(64, (3, 3), padding='same', kernel_initializer='he_normal')(out3))
    # 1 / 4
    P2 = layers.Conv2D(64, (3, 3), padding='same', kernel_initializer='he_normal')(
        layers.Add()([in2, layers.UpSampling2D(size=(2, 2))(out3)]))
    # (b, /4, /4, 256)

    print("P shapes")
    print(P2.shape)
    print(P3.shape)
    print(P4.shape)
    print(P5.shape)
    fuse = layers.Concatenate()([P2, P3, P4, P5])

    # probability map
    p = layers.Conv2D(64, (3, 3), padding='same', kernel_initializer='he_normal', use_bias=False)(fuse)
    p = layers.BatchNormalization()(p)
    p = layers.ReLU()(p)
    p = layers.Conv2DTranspose(64, (2, 2), strides=(2, 2), kernel_initializer='he_normal', use_bias=False)(p)
    p = layers.BatchNormalization()(p)
    p = layers.ReLU()(p)
    p = layers.Conv2DTranspose(1, (2, 2), strides=(2, 2), kernel_initializer='he_normal',
                               activation='sigmoid')(p)

    # threshold map
    t = layers.Conv2D(64, (3, 3), padding='same', kernel_initializer='he_normal', use_bias=False)(fuse)
    t = layers.BatchNormalization()(t)
    t = layers.ReLU()(t)
    t = layers.Conv2DTranspose(64, (2, 2), strides=(2, 2), kernel_initializer='he_normal', use_bias=False)(t)
    t = layers.BatchNormalization()(t)
    t = layers.ReLU()(t)
    t = layers.Conv2DTranspose(1, (2, 2), strides=(2, 2), kernel_initializer='he_normal',
                               activation='sigmoid')(t)

    # approximate binary map
    b_hat = layers.Lambda(lambda x: 1 / (1 + tf.compat.v1.exp(-k * (x[0] - x[1]))))([p, t])
    print("SHAPES ")
    print("C2 " )
    print(C2.shape)
    print(p.shape)

    print(b_hat.shape)
    print(gt_input.shape)
    print(mask_input.shape)
    print(t.shape)
    print(thresh_input.shape)
    print(thresh_mask_input.shape)
    loss = tf.compat.v1.keras.layers.Lambda(db_loss, name='db_loss', output_shape = (None, None, None, 1))([p, b_hat, gt_input, mask_input, t, thresh_input, thresh_mask_input])
    print(loss.shape)

    training_model = models.Model(inputs=[image_input, gt_input, mask_input, thresh_input, thresh_mask_input],
                                  outputs=loss)
    print("CONV1")
    print(training_model.get_layer("conv1_conv").input)
    print(training_model.get_layer("conv1_conv").output)
    prediction_model = models.Model(inputs=image_input, outputs=p)
    print(training_model.summary())
    return training_model, prediction_model
コード例 #27
0
def get_unet():
    concat_axis = 3
    inputs = layers.Input(shape=(512, 512, 1))
    #ss = layers.Lambda(lambda x: x[:,:,:,1:], output_shape=(None,512,512,2))(inputs)

    feats = 8  #16
    bn0 = BatchNormalization(axis=3)(inputs)

    conv1 = layers.Conv2D(feats, (3, 3),
                          activation='relu',
                          padding='same',
                          name='conv1_1')(bn0)
    bn2 = BatchNormalization(axis=3)(conv1)
    pool1 = layers.MaxPooling2D(pool_size=(2, 2))(bn2)  #256

    conv2 = layers.Conv2D(2 * feats, (3, 3), activation='relu',
                          padding='same')(pool1)
    bn4 = BatchNormalization(axis=3)(conv2)
    pool2 = layers.MaxPooling2D(pool_size=(2, 2))(bn4)  #128

    conv3 = layers.Conv2D(4 * feats, (3, 3), activation='relu',
                          padding='same')(pool2)
    bn6 = BatchNormalization(axis=3)(conv3)
    pool3 = layers.MaxPooling2D(pool_size=(2, 2))(bn6)  #64

    conv4 = layers.Conv2D(8 * feats, (3, 3), activation='relu',
                          padding='same')(pool3)
    bn8 = BatchNormalization(axis=3)(conv4)
    pool4 = layers.MaxPooling2D(pool_size=(2, 2))(bn8)  #32

    conv5 = layers.Conv2D(16 * feats, (3, 3),
                          activation='relu',
                          padding='same')(pool4)
    bn10 = BatchNormalization(axis=3)(conv5)
    pool5 = layers.MaxPooling2D(pool_size=(2, 2))(bn10)  #16

    conv6 = layers.Conv2D(32 * feats, (3, 3),
                          activation='relu',
                          padding='same')(pool5)
    bn11 = BatchNormalization(axis=3)(conv6)

    up_conv6 = layers.UpSampling2D(size=(2, 2))(bn11)  #32
    up7 = layers.concatenate([up_conv6, conv5], axis=concat_axis)

    conv7 = layers.Conv2D(16 * feats, (3, 3),
                          activation='relu',
                          padding='same')(up7)
    bn13 = BatchNormalization(axis=3)(conv7)

    up_conv5 = layers.UpSampling2D(size=(2, 2))(bn13)  #64
    up6 = layers.concatenate([up_conv5, conv4], axis=concat_axis)

    conv6 = layers.Conv2D(8 * feats, (3, 3), activation='relu',
                          padding='same')(up6)
    bn15 = BatchNormalization(axis=3)(conv6)

    up_conv6 = layers.UpSampling2D(size=(2, 2))(bn15)  #128
    up7 = layers.concatenate([up_conv6, conv3], axis=concat_axis)

    conv7 = layers.Conv2D(4 * feats, (3, 3), activation='relu',
                          padding='same')(up7)
    bn13 = BatchNormalization(axis=3)(conv7)

    # Rectify last convolution layer to constraint output to positive precipitation values.
    conv8 = layers.Conv2D(1, (1, 1), activation='relu')(bn13)

    model = models.Model(inputs=inputs, outputs=conv8)

    return model
コード例 #28
0
def dunet(height=560, width=400, nbr_mask=10, nbr=16, activation='softmax'):

    #Inputimage
    entree = layers.Input(shape=(height, width, 3), dtype='float32')

    #Level 0
    result = layers.Conv2D(nbr, 3, activation='relu', padding='same')(entree)
    result = layers.BatchNormalization()(result)
    result = layers.Conv2D(nbr, 3, activation='relu', padding='same')(result)
    result1 = layers.BatchNormalization()(result)

    result = layers.MaxPool2D()(result1)

    #Level -1
    result = layers.Conv2D(2 * nbr, 3, activation='relu',
                           padding='same')(result)
    result = layers.BatchNormalization()(result)
    result = layers.Conv2D(2 * nbr, 3, activation='relu',
                           padding='same')(result)
    result2 = layers.BatchNormalization()(result)

    result = layers.MaxPool2D()(result2)

    #Level -2
    result = layers.Conv2D(4 * nbr, 3, activation='relu',
                           padding='same')(result)
    result = layers.BatchNormalization()(result)
    result = layers.Conv2D(4 * nbr, 3, activation='relu',
                           padding='same')(result)
    result3 = layers.BatchNormalization()(result)

    result = layers.MaxPool2D()(result3)

    # Level -3
    result = layers.Conv2D(8 * nbr, 3, activation='relu',
                           padding='same')(result)
    result = layers.BatchNormalization()(result)

    # ASPP
    b0 = layers.DepthwiseConv2D(3, activation='relu', padding='same')(result)
    b0 = layers.BatchNormalization()(b0)
    b0 = layers.Conv2D(8 * nbr, 1, activation='relu', padding='same')(b0)
    b0 = layers.BatchNormalization()(b0)

    b1 = layers.DepthwiseConv2D(3,
                                dilation_rate=(6, 6),
                                activation='relu',
                                padding='same')(result)
    b1 = layers.BatchNormalization()(b1)
    b1 = layers.Conv2D(8 * nbr, 1, activation='relu', padding='same')(b1)
    b1 = layers.BatchNormalization()(b1)

    b2 = layers.DepthwiseConv2D(3,
                                dilation_rate=(12, 12),
                                activation='relu',
                                padding='same')(result)
    b2 = layers.BatchNormalization()(b2)
    b2 = layers.Conv2D(8 * nbr, 1, activation='relu', padding='same')(b2)
    b2 = layers.BatchNormalization()(b2)

    b3 = layers.DepthwiseConv2D(3,
                                dilation_rate=(18, 18),
                                activation='relu',
                                padding='same')(result)
    b3 = layers.BatchNormalization()(b3)
    b3 = layers.Conv2D(8 * nbr, 1, activation='relu', padding='same')(b3)
    b3 = layers.BatchNormalization()(b3)

    b4 = layers.AveragePooling2D()(result)
    b4 = layers.Conv2D(8 * nbr, 1, activation='relu', padding='same')(b4)
    b4 = layers.BatchNormalization()(b4)
    b4 = layers.UpSampling2D(interpolation='bilinear')(b4)

    result = layers.Concatenate()([b4, b0, b1, b2, b3])

    result = layers.Conv2D(8 * nbr, 1, activation='relu',
                           padding='same')(result)

    result = layers.Conv2DTranspose(4 * nbr,
                                    2,
                                    strides=(2, 2),
                                    activation='relu',
                                    padding='same')(result)

    # Level -2
    result = tf.concat([result, result3], axis=3)
    result = layers.Conv2D(4 * nbr, 3, activation='relu',
                           padding='same')(result)
    result = layers.BatchNormalization()(result)
    result = layers.Conv2D(4 * nbr, 3, activation='relu',
                           padding='same')(result)
    result = layers.BatchNormalization()(result)

    result = layers.Conv2DTranspose(2 * nbr,
                                    2,
                                    strides=(2, 2),
                                    activation='relu',
                                    padding='same')(result)

    # Level -1
    result = tf.concat([result, result2], axis=3)
    result = layers.Conv2D(2 * nbr, 3, activation='relu',
                           padding='same')(result)
    result = layers.BatchNormalization()(result)
    result = layers.Conv2D(2 * nbr, 3, activation='relu',
                           padding='same')(result)
    result = layers.BatchNormalization()(result)

    result = layers.Conv2DTranspose(nbr,
                                    2,
                                    strides=(2, 2),
                                    activation='relu',
                                    padding='same')(result)

    # Level 0
    result = tf.concat([result, result1], axis=3)
    result = layers.Conv2D(nbr, 3, activation='relu', padding='same')(result)
    result = layers.BatchNormalization()(result)
    result = layers.Conv2D(nbr, 3, activation='relu', padding='same')(result)
    result = layers.BatchNormalization()(result)

    # Output
    sortie = layers.Conv2D(nbr_mask, 1, activation=activation,
                           padding='same')(result)

    model = models.Model(inputs=entree, outputs=sortie)
    return model
コード例 #29
0
def build_BiFPN(features, num_channels, id, freeze_bn=False):
    if id == 0:
        _, _, C3, C4, C5 = features
        P3_in = C3
        P4_in = C4
        P5_in = C5
        P6_in = layers.Conv2D(num_channels,
                              kernel_size=1,
                              padding='same',
                              name='resample_p6/conv2d')(C5)
        P6_in = layers.BatchNormalization(momentum=MOMENTUM,
                                          epsilon=EPSILON,
                                          name='resample_p6/bn')(P6_in)
        # P6_in = BatchNormalization(freeze=freeze_bn, name='resample_p6/bn')(P6_in)
        P6_in = layers.MaxPooling2D(pool_size=3,
                                    strides=2,
                                    padding='same',
                                    name='resample_p6/maxpool')(P6_in)
        P7_in = layers.MaxPooling2D(pool_size=3,
                                    strides=2,
                                    padding='same',
                                    name='resample_p7/maxpool')(P6_in)
        P7_U = layers.UpSampling2D()(P7_in)
        P6_td = layers.Add(name=f'fpn_cells/cell_{id}/fnode0/add')(
            [P6_in, P7_U])
        P6_td = layers.Activation(lambda x: tf.nn.swish(x))(P6_td)
        P6_td = SeparableConvBlock(
            num_channels=num_channels,
            kernel_size=3,
            strides=1,
            name=f'fpn_cells/cell_{id}/fnode0/op_after_combine5')(P6_td)
        P5_in_1 = layers.Conv2D(
            num_channels,
            kernel_size=1,
            padding='same',
            name=f'fpn_cells/cell_{id}/fnode1/resample_0_2_6/conv2d')(P5_in)
        P5_in_1 = layers.BatchNormalization(
            momentum=MOMENTUM,
            epsilon=EPSILON,
            name=f'fpn_cells/cell_{id}/fnode1/resample_0_2_6/bn')(P5_in_1)
        # P5_in_1 = BatchNormalization(freeze=freeze_bn, name=f'fpn_cells/cell_{id}/fnode1/resample_0_2_6/bn')(P5_in_1)
        P6_U = layers.UpSampling2D()(P6_td)
        P5_td = layers.Add(name=f'fpn_cells/cell_{id}/fnode1/add')(
            [P5_in_1, P6_U])
        P5_td = layers.Activation(lambda x: tf.nn.swish(x))(P5_td)
        P5_td = SeparableConvBlock(
            num_channels=num_channels,
            kernel_size=3,
            strides=1,
            name=f'fpn_cells/cell_{id}/fnode1/op_after_combine6')(P5_td)
        P4_in_1 = layers.Conv2D(
            num_channels,
            kernel_size=1,
            padding='same',
            name=f'fpn_cells/cell_{id}/fnode2/resample_0_1_7/conv2d')(P4_in)
        P4_in_1 = layers.BatchNormalization(
            momentum=MOMENTUM,
            epsilon=EPSILON,
            name=f'fpn_cells/cell_{id}/fnode2/resample_0_1_7/bn')(P4_in_1)
        # P4_in_1 = BatchNormalization(freeze=freeze_bn, name=f'fpn_cells/cell_{id}/fnode2/resample_0_1_7/bn')(P4_in_1)
        P5_U = layers.UpSampling2D()(P5_td)
        P4_td = layers.Add(name=f'fpn_cells/cell_{id}/fnode2/add')(
            [P4_in_1, P5_U])
        P4_td = layers.Activation(lambda x: tf.nn.swish(x))(P4_td)
        P4_td = SeparableConvBlock(
            num_channels=num_channels,
            kernel_size=3,
            strides=1,
            name=f'fpn_cells/cell_{id}/fnode2/op_after_combine7')(P4_td)
        P3_in = layers.Conv2D(
            num_channels,
            kernel_size=1,
            padding='same',
            name=f'fpn_cells/cell_{id}/fnode3/resample_0_0_8/conv2d')(P3_in)
        P3_in = layers.BatchNormalization(
            momentum=MOMENTUM,
            epsilon=EPSILON,
            name=f'fpn_cells/cell_{id}/fnode3/resample_0_0_8/bn')(P3_in)
        # P3_in = BatchNormalization(freeze=freeze_bn, name=f'fpn_cells/cell_{id}/fnode3/resample_0_0_8/bn')(P3_in)
        P4_U = layers.UpSampling2D()(P4_td)
        P3_out = layers.Add(name=f'fpn_cells/cell_{id}/fnode3/add')(
            [P3_in, P4_U])
        P3_out = layers.Activation(lambda x: tf.nn.swish(x))(P3_out)
        P3_out = SeparableConvBlock(
            num_channels=num_channels,
            kernel_size=3,
            strides=1,
            name=f'fpn_cells/cell_{id}/fnode3/op_after_combine8')(P3_out)
        P4_in_2 = layers.Conv2D(
            num_channels,
            kernel_size=1,
            padding='same',
            name=f'fpn_cells/cell_{id}/fnode4/resample_0_1_9/conv2d')(P4_in)
        P4_in_2 = layers.BatchNormalization(
            momentum=MOMENTUM,
            epsilon=EPSILON,
            name=f'fpn_cells/cell_{id}/fnode4/resample_0_1_9/bn')(P4_in_2)
        # P4_in_2 = BatchNormalization(freeze=freeze_bn, name=f'fpn_cells/cell_{id}/fnode4/resample_0_1_9/bn')(P4_in_2)
        P3_D = layers.MaxPooling2D(pool_size=3, strides=2,
                                   padding='same')(P3_out)
        P4_out = layers.Add(name=f'fpn_cells/cell_{id}/fnode4/add')(
            [P4_in_2, P4_td, P3_D])
        P4_out = layers.Activation(lambda x: tf.nn.swish(x))(P4_out)
        P4_out = SeparableConvBlock(
            num_channels=num_channels,
            kernel_size=3,
            strides=1,
            name=f'fpn_cells/cell_{id}/fnode4/op_after_combine9')(P4_out)

        P5_in_2 = layers.Conv2D(
            num_channels,
            kernel_size=1,
            padding='same',
            name=f'fpn_cells/cell_{id}/fnode5/resample_0_2_10/conv2d')(P5_in)
        P5_in_2 = layers.BatchNormalization(
            momentum=MOMENTUM,
            epsilon=EPSILON,
            name=f'fpn_cells/cell_{id}/fnode5/resample_0_2_10/bn')(P5_in_2)
        # P5_in_2 = BatchNormalization(freeze=freeze_bn, name=f'fpn_cells/cell_{id}/fnode5/resample_0_2_10/bn')(P5_in_2)
        P4_D = layers.MaxPooling2D(pool_size=3, strides=2,
                                   padding='same')(P4_out)
        P5_out = layers.Add(name=f'fpn_cells/cell_{id}/fnode5/add')(
            [P5_in_2, P5_td, P4_D])
        P5_out = layers.Activation(lambda x: tf.nn.swish(x))(P5_out)
        P5_out = SeparableConvBlock(
            num_channels=num_channels,
            kernel_size=3,
            strides=1,
            name=f'fpn_cells/cell_{id}/fnode5/op_after_combine10')(P5_out)

        P5_D = layers.MaxPooling2D(pool_size=3, strides=2,
                                   padding='same')(P5_out)
        P6_out = layers.Add(name=f'fpn_cells/cell_{id}/fnode6/add')(
            [P6_in, P6_td, P5_D])
        P6_out = layers.Activation(lambda x: tf.nn.swish(x))(P6_out)
        P6_out = SeparableConvBlock(
            num_channels=num_channels,
            kernel_size=3,
            strides=1,
            name=f'fpn_cells/cell_{id}/fnode6/op_after_combine11')(P6_out)

        P6_D = layers.MaxPooling2D(pool_size=3, strides=2,
                                   padding='same')(P6_out)
        P7_out = layers.Add(name=f'fpn_cells/cell_{id}/fnode7/add')(
            [P7_in, P6_D])
        P7_out = layers.Activation(lambda x: tf.nn.swish(x))(P7_out)
        P7_out = SeparableConvBlock(
            num_channels=num_channels,
            kernel_size=3,
            strides=1,
            name=f'fpn_cells/cell_{id}/fnode7/op_after_combine12')(P7_out)

    else:
        P3_in, P4_in, P5_in, P6_in, P7_in = features
        P7_U = layers.UpSampling2D()(P7_in)
        P6_td = layers.Add(name=f'fpn_cells/cell_{id}/fnode0/add')(
            [P6_in, P7_U])
        P6_td = layers.Activation(lambda x: tf.nn.swish(x))(P6_td)
        P6_td = SeparableConvBlock(
            num_channels=num_channels,
            kernel_size=3,
            strides=1,
            name=f'fpn_cells/cell_{id}/fnode0/op_after_combine5')(P6_td)
        P6_U = layers.UpSampling2D()(P6_td)
        P5_td = layers.Add(name=f'fpn_cells/cell_{id}/fnode1/add')(
            [P5_in, P6_U])
        P5_td = layers.Activation(lambda x: tf.nn.swish(x))(P5_td)
        P5_td = SeparableConvBlock(
            num_channels=num_channels,
            kernel_size=3,
            strides=1,
            name=f'fpn_cells/cell_{id}/fnode1/op_after_combine6')(P5_td)
        P5_U = layers.UpSampling2D()(P5_td)
        P4_td = layers.Add(name=f'fpn_cells/cell_{id}/fnode2/add')(
            [P4_in, P5_U])
        P4_td = layers.Activation(lambda x: tf.nn.swish(x))(P4_td)
        P4_td = SeparableConvBlock(
            num_channels=num_channels,
            kernel_size=3,
            strides=1,
            name=f'fpn_cells/cell_{id}/fnode2/op_after_combine7')(P4_td)
        P4_U = layers.UpSampling2D()(P4_td)
        P3_out = layers.Add(name=f'fpn_cells/cell_{id}/fnode3/add')(
            [P3_in, P4_U])
        P3_out = layers.Activation(lambda x: tf.nn.swish(x))(P3_out)
        P3_out = SeparableConvBlock(
            num_channels=num_channels,
            kernel_size=3,
            strides=1,
            name=f'fpn_cells/cell_{id}/fnode3/op_after_combine8')(P3_out)
        P3_D = layers.MaxPooling2D(pool_size=3, strides=2,
                                   padding='same')(P3_out)
        P4_out = layers.Add(name=f'fpn_cells/cell_{id}/fnode4/add')(
            [P4_in, P4_td, P3_D])
        P4_out = layers.Activation(lambda x: tf.nn.swish(x))(P4_out)
        P4_out = SeparableConvBlock(
            num_channels=num_channels,
            kernel_size=3,
            strides=1,
            name=f'fpn_cells/cell_{id}/fnode4/op_after_combine9')(P4_out)

        P4_D = layers.MaxPooling2D(pool_size=3, strides=2,
                                   padding='same')(P4_out)
        P5_out = layers.Add(name=f'fpn_cells/cell_{id}/fnode5/add')(
            [P5_in, P5_td, P4_D])
        P5_out = layers.Activation(lambda x: tf.nn.swish(x))(P5_out)
        P5_out = SeparableConvBlock(
            num_channels=num_channels,
            kernel_size=3,
            strides=1,
            name=f'fpn_cells/cell_{id}/fnode5/op_after_combine10')(P5_out)

        P5_D = layers.MaxPooling2D(pool_size=3, strides=2,
                                   padding='same')(P5_out)
        P6_out = layers.Add(name=f'fpn_cells/cell_{id}/fnode6/add')(
            [P6_in, P6_td, P5_D])
        P6_out = layers.Activation(lambda x: tf.nn.swish(x))(P6_out)
        P6_out = SeparableConvBlock(
            num_channels=num_channels,
            kernel_size=3,
            strides=1,
            name=f'fpn_cells/cell_{id}/fnode6/op_after_combine11')(P6_out)

        P6_D = layers.MaxPooling2D(pool_size=3, strides=2,
                                   padding='same')(P6_out)
        P7_out = layers.Add(name=f'fpn_cells/cell_{id}/fnode7/add')(
            [P7_in, P6_D])
        P7_out = layers.Activation(lambda x: tf.nn.swish(x))(P7_out)
        P7_out = SeparableConvBlock(
            num_channels=num_channels,
            kernel_size=3,
            strides=1,
            name=f'fpn_cells/cell_{id}/fnode7/op_after_combine12')(P7_out)
    return P3_out, P4_td, P5_td, P6_td, P7_out
コード例 #30
0
def upconv_concat(net, tensor_concat):

    net = layers.UpSampling2D(size=(2, 2))(net)
    return layers.Concatenate(axis=-1)([net, tensor_concat])