def build_net(self,
                  conv_window=(6, 3),
                  pooling_window=(10, 1),
                  n_filters=(64, 32, 16)):

        input_img = Input(
            shape=self.dims[1:]
        )  # adapt this if using `channels_first` image data format
        print("shape of input", K.int_shape(input_img))
        conv_1 = Conv2D(n_filters[0],
                        conv_window,
                        activation='relu',
                        padding='same')(input_img)
        print("shape after first conv", K.int_shape(conv_1))
        pool_1 = MaxPooling2D(pooling_window, padding='same')(conv_1)
        print("shape after first pooling", K.int_shape(pool_1))
        conv_2 = Conv2D(n_filters[1],
                        conv_window,
                        activation='relu',
                        padding='same')(pool_1)
        print("shape after second conv", K.int_shape(conv_2))

        pool_2 = MaxPooling2D(pooling_window, padding='same')(conv_2)
        print("shape after second pooling", K.int_shape(pool_2))

        conv_3 = Conv2D(n_filters[2],
                        conv_window,
                        activation='relu',
                        padding='same')(pool_2)
        print("shape after third conv", K.int_shape(conv_3))

        encoded = MaxPooling2D(pooling_window, padding='same')(conv_3)
        print("shape of encoded", K.int_shape(encoded))

        up_3 = UpSampling2D(pooling_window)(encoded)
        print("shape after upsample third pooling", K.int_shape(up_3))

        conv_neg_3 = Conv2D(n_filters[2],
                            conv_window,
                            activation='relu',
                            padding='same')(up_3)
        print("shape after decode third conv", K.int_shape(conv_neg_3))

        up_2 = UpSampling2D(pooling_window)(conv_neg_3)
        print("shape after upsample second pooling", K.int_shape(up_2))

        conv_neg_2 = Conv2D(n_filters[1],
                            conv_window,
                            activation='relu',
                            padding='same')(up_2)
        print("shape after decode second conv", K.int_shape(conv_neg_2))
        up_1 = UpSampling2D(pooling_window)(conv_neg_2)
        print("shape after upsample first pooling", K.int_shape(up_1))
        conv_neg_3 = Conv2D(n_filters[0],
                            conv_window,
                            activation='relu',
                            padding='same')(up_1)
        print("shape after decode first conv", K.int_shape(conv_neg_3))
        decoded = Conv2D(1, conv_window, activation='linear',
                         padding='same')(conv_neg_3)
        print("shape after decode to input", K.int_shape(decoded))

        self.autoencoder = Model(input_img, decoded)
        self.autoencoder.compile(optimizer='adam', loss='mean_squared_error')
        self.encoder_model = Model(self.autoencoder.input,
                                   self.autoencoder.layers[6].output)
def create(input_shape, activation=tf.nn.relu, num_class=1):
    opts = locals().copy()

    concat_axis = 3
    inputs = tf.keras.layers.Input(shape=input_shape)

    Conv2D_ = functools.partial(Conv2D, activation=activation, padding='same')

    conv1 = Conv2D_(32, (3, 3), name='conv1_1')(inputs)
    conv1 = Conv2D_(32, (3, 3))(conv1)
    pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)

    conv2 = Conv2D_(64, (3, 3))(pool1)
    conv2 = Conv2D_(64, (3, 3))(conv2)
    pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)

    conv3 = Conv2D_(128, (3, 3))(pool2)
    conv3 = Conv2D_(128, (3, 3))(conv3)
    pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)

    conv4 = Conv2D_(256, (3, 3))(pool3)
    conv4 = Conv2D_(256, (3, 3))(conv4)
    pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)

    conv5 = Conv2D_(512, (3, 3))(pool4)
    conv5 = Conv2D_(512, (3, 3))(conv5)

    up_conv5 = UpSampling2D(size=(2, 2))(conv5)
    ch, cw = get_crop_shape(conv4, up_conv5)
    crop_conv4 = Cropping2D(cropping=(ch, cw))(conv4)
    up6 = concatenate([up_conv5, crop_conv4], axis=concat_axis)
    conv6 = Conv2D_(256, (3, 3))(up6)
    conv6 = Conv2D_(256, (3, 3))(conv6)

    up_conv6 = UpSampling2D(size=(2, 2))(conv6)
    ch, cw = get_crop_shape(conv3, up_conv6)
    crop_conv3 = Cropping2D(cropping=(ch, cw))(conv3)
    up7 = concatenate([up_conv6, crop_conv3], axis=concat_axis)
    conv7 = Conv2D_(128, (3, 3))(up7)
    conv7 = Conv2D_(128, (3, 3))(conv7)

    up_conv7 = UpSampling2D(size=(2, 2))(conv7)
    ch, cw = get_crop_shape(conv2, up_conv7)
    crop_conv2 = Cropping2D(cropping=(ch, cw))(conv2)
    up8 = concatenate([up_conv7, crop_conv2], axis=concat_axis)
    conv8 = Conv2D_(64, (3, 3))(up8)
    conv8 = Conv2D_(64, (3, 3))(conv8)

    up_conv8 = UpSampling2D(size=(2, 2))(conv8)
    ch, cw = get_crop_shape(conv1, up_conv8)
    crop_conv1 = Cropping2D(cropping=(ch, cw))(conv1)
    up9 = concatenate([up_conv8, crop_conv1], axis=concat_axis)
    conv9 = Conv2D_(32, (3, 3))(up9)
    conv9 = Conv2D_(32, (3, 3))(conv9)

    ch, cw = get_crop_shape(inputs, conv9)
    conv9 = ZeroPadding2D(padding=((ch[0], ch[1]), (cw[0], cw[1])))(conv9)
    conv10 = Conv2D_(4 * num_class, (1, 1))(conv9)
    evidential_output = Conv2DNormalGamma(num_class, (1, 1))(conv10)

    model = tf.keras.models.Model(inputs=inputs, outputs=evidential_output)
    return model, opts
Exemplo n.º 3
0
def make_yolov3_model():
    input_image = Input(shape=(None, None, 3))
    # Layer  0 => 4
    x = _conv_block(input_image, [{
        'filter': 32,
        'kernel': 3,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 0
    }, {
        'filter': 64,
        'kernel': 3,
        'stride': 2,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 1
    }, {
        'filter': 32,
        'kernel': 1,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 2
    }, {
        'filter': 64,
        'kernel': 3,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 3
    }])
    # Layer  5 => 8
    x = _conv_block(x, [{
        'filter': 128,
        'kernel': 3,
        'stride': 2,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 5
    }, {
        'filter': 64,
        'kernel': 1,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 6
    }, {
        'filter': 128,
        'kernel': 3,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 7
    }])
    # Layer  9 => 11
    x = _conv_block(x, [{
        'filter': 64,
        'kernel': 1,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 9
    }, {
        'filter': 128,
        'kernel': 3,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 10
    }])
    # Layer 12 => 15
    x = _conv_block(x, [{
        'filter': 256,
        'kernel': 3,
        'stride': 2,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 12
    }, {
        'filter': 128,
        'kernel': 1,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 13
    }, {
        'filter': 256,
        'kernel': 3,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 14
    }])
    # Layer 16 => 36
    for i in range(7):
        x = _conv_block(x, [{
            'filter': 128,
            'kernel': 1,
            'stride': 1,
            'bnorm': True,
            'leaky': True,
            'layer_idx': 16 + i * 3
        }, {
            'filter': 256,
            'kernel': 3,
            'stride': 1,
            'bnorm': True,
            'leaky': True,
            'layer_idx': 17 + i * 3
        }])
    skip_36 = x
    # Layer 37 => 40
    x = _conv_block(x, [{
        'filter': 512,
        'kernel': 3,
        'stride': 2,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 37
    }, {
        'filter': 256,
        'kernel': 1,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 38
    }, {
        'filter': 512,
        'kernel': 3,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 39
    }])
    # Layer 41 => 61
    for i in range(7):
        x = _conv_block(x, [{
            'filter': 256,
            'kernel': 1,
            'stride': 1,
            'bnorm': True,
            'leaky': True,
            'layer_idx': 41 + i * 3
        }, {
            'filter': 512,
            'kernel': 3,
            'stride': 1,
            'bnorm': True,
            'leaky': True,
            'layer_idx': 42 + i * 3
        }])
    skip_61 = x
    # Layer 62 => 65
    x = _conv_block(x, [{
        'filter': 1024,
        'kernel': 3,
        'stride': 2,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 62
    }, {
        'filter': 512,
        'kernel': 1,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 63
    }, {
        'filter': 1024,
        'kernel': 3,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 64
    }])
    # Layer 66 => 74
    for i in range(3):
        x = _conv_block(x, [{
            'filter': 512,
            'kernel': 1,
            'stride': 1,
            'bnorm': True,
            'leaky': True,
            'layer_idx': 66 + i * 3
        }, {
            'filter': 1024,
            'kernel': 3,
            'stride': 1,
            'bnorm': True,
            'leaky': True,
            'layer_idx': 67 + i * 3
        }])
    # Layer 75 => 79
    x = _conv_block(x, [{
        'filter': 512,
        'kernel': 1,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 75
    }, {
        'filter': 1024,
        'kernel': 3,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 76
    }, {
        'filter': 512,
        'kernel': 1,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 77
    }, {
        'filter': 1024,
        'kernel': 3,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 78
    }, {
        'filter': 512,
        'kernel': 1,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 79
    }],
                    skip=False)
    # Layer 80 => 82
    yolo_82 = _conv_block(x, [{
        'filter': 1024,
        'kernel': 3,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 80
    }, {
        'filter': 255,
        'kernel': 1,
        'stride': 1,
        'bnorm': False,
        'leaky': False,
        'layer_idx': 81
    }],
                          skip=False)
    # Layer 83 => 86
    x = _conv_block(x, [{
        'filter': 256,
        'kernel': 1,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 84
    }],
                    skip=False)
    x = UpSampling2D(2)(x)
    x = concatenate([x, skip_61])
    # Layer 87 => 91
    x = _conv_block(x, [{
        'filter': 256,
        'kernel': 1,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 87
    }, {
        'filter': 512,
        'kernel': 3,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 88
    }, {
        'filter': 256,
        'kernel': 1,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 89
    }, {
        'filter': 512,
        'kernel': 3,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 90
    }, {
        'filter': 256,
        'kernel': 1,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 91
    }],
                    skip=False)
    # Layer 92 => 94
    yolo_94 = _conv_block(x, [{
        'filter': 512,
        'kernel': 3,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 92
    }, {
        'filter': 255,
        'kernel': 1,
        'stride': 1,
        'bnorm': False,
        'leaky': False,
        'layer_idx': 93
    }],
                          skip=False)
    # Layer 95 => 98
    x = _conv_block(x, [{
        'filter': 128,
        'kernel': 1,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 96
    }],
                    skip=False)
    x = UpSampling2D(2)(x)
    x = concatenate([x, skip_36])
    # Layer 99 => 106
    yolo_106 = _conv_block(x, [{
        'filter': 128,
        'kernel': 1,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 99
    }, {
        'filter': 256,
        'kernel': 3,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 100
    }, {
        'filter': 128,
        'kernel': 1,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 101
    }, {
        'filter': 256,
        'kernel': 3,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 102
    }, {
        'filter': 128,
        'kernel': 1,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 103
    }, {
        'filter': 256,
        'kernel': 3,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 104
    }, {
        'filter': 255,
        'kernel': 1,
        'stride': 1,
        'bnorm': False,
        'leaky': False,
        'layer_idx': 105
    }],
                           skip=False)
    model = Model(input_image, [yolo_82, yolo_94, yolo_106])
    return model
def SegNet(img_w=512, img_h=512, n_label=6):
    model = Sequential()
    #encoder
    model.add(
        Conv2D(64, (3, 3),
               strides=(1, 1),
               input_shape=(img_w, img_h, 3),
               padding='same',
               activation='relu',
               data_format='channels_last'))
    model.add(BatchNormalization())
    model.add(
        Conv2D(64, (3, 3), strides=(1, 1), padding='same', activation='relu'))
    model.add(BatchNormalization())
    model.add(MaxPooling2D(pool_size=(2, 2)))
    #(128,128)
    model.add(
        Conv2D(128, (3, 3), strides=(1, 1), padding='same', activation='relu'))
    model.add(BatchNormalization())
    model.add(
        Conv2D(128, (3, 3), strides=(1, 1), padding='same', activation='relu'))
    model.add(BatchNormalization())
    model.add(MaxPooling2D(pool_size=(2, 2)))
    #(64,64)
    model.add(
        Conv2D(256, (3, 3), strides=(1, 1), padding='same', activation='relu'))
    model.add(BatchNormalization())
    model.add(
        Conv2D(256, (3, 3), strides=(1, 1), padding='same', activation='relu'))
    model.add(BatchNormalization())
    model.add(
        Conv2D(256, (3, 3), strides=(1, 1), padding='same', activation='relu'))
    model.add(BatchNormalization())
    model.add(MaxPooling2D(pool_size=(2, 2)))
    #(32,32)
    model.add(
        Conv2D(512, (3, 3), strides=(1, 1), padding='same', activation='relu'))
    model.add(BatchNormalization())
    model.add(
        Conv2D(512, (3, 3), strides=(1, 1), padding='same', activation='relu'))
    model.add(BatchNormalization())
    model.add(
        Conv2D(512, (3, 3), strides=(1, 1), padding='same', activation='relu'))
    model.add(BatchNormalization())
    model.add(MaxPooling2D(pool_size=(2, 2)))
    #(16,16)
    model.add(
        Conv2D(512, (3, 3), strides=(1, 1), padding='same', activation='relu'))
    model.add(BatchNormalization())
    model.add(
        Conv2D(512, (3, 3), strides=(1, 1), padding='same', activation='relu'))
    model.add(BatchNormalization())
    model.add(
        Conv2D(512, (3, 3), strides=(1, 1), padding='same', activation='relu'))
    model.add(BatchNormalization())
    model.add(MaxPooling2D(pool_size=(2, 2)))
    #(8,8)
    #decoder
    model.add(UpSampling2D(size=(2, 2)))
    #(16,16)
    model.add(
        Conv2D(512, (3, 3), strides=(1, 1), padding='same', activation='relu'))
    model.add(BatchNormalization())
    model.add(
        Conv2D(512, (3, 3), strides=(1, 1), padding='same', activation='relu'))
    model.add(BatchNormalization())
    model.add(
        Conv2D(512, (3, 3), strides=(1, 1), padding='same', activation='relu'))
    model.add(BatchNormalization())
    model.add(UpSampling2D(size=(2, 2)))
    #(32,32)
    model.add(
        Conv2D(512, (3, 3), strides=(1, 1), padding='same', activation='relu'))
    model.add(BatchNormalization())
    model.add(
        Conv2D(512, (3, 3), strides=(1, 1), padding='same', activation='relu'))
    model.add(BatchNormalization())
    model.add(
        Conv2D(512, (3, 3), strides=(1, 1), padding='same', activation='relu'))
    model.add(BatchNormalization())
    model.add(UpSampling2D(size=(2, 2)))
    #(64,64)
    model.add(
        Conv2D(256, (3, 3), strides=(1, 1), padding='same', activation='relu'))
    model.add(BatchNormalization())
    model.add(
        Conv2D(256, (3, 3), strides=(1, 1), padding='same', activation='relu'))
    model.add(BatchNormalization())
    model.add(
        Conv2D(256, (3, 3), strides=(1, 1), padding='same', activation='relu'))
    model.add(BatchNormalization())
    model.add(UpSampling2D(size=(2, 2)))
    #(128,128)
    model.add(
        Conv2D(128, (3, 3), strides=(1, 1), padding='same', activation='relu'))
    model.add(BatchNormalization())
    model.add(
        Conv2D(128, (3, 3), strides=(1, 1), padding='same', activation='relu'))
    model.add(BatchNormalization())
    model.add(UpSampling2D(size=(2, 2)))
    #(256,256)
    model.add(
        Conv2D(64, (3, 3),
               strides=(1, 1),
               input_shape=(img_w, img_h, 3),
               padding='same',
               activation='relu',
               data_format='channels_last'))
    model.add(BatchNormalization())
    model.add(
        Conv2D(64, (3, 3), strides=(1, 1), padding='same', activation='relu'))
    model.add(BatchNormalization())
    model.add(Conv2D(n_label, (1, 1), strides=(1, 1), padding='same'))
    model.add(Reshape((img_w * img_h, n_label)))
    #axis=1和axis=2互换位置,等同于np.swapaxes(layer,1,2)
    #model.add(Permute((2,1)))
    model.add(Activation('softmax'))
    return model
Exemplo n.º 5
0
def unet(pretrained_weights=None,
         shape=(256, 256, 1),
         filters=64,
         optimizer=Adam(lr=1e-4),
         loss='binary_crossentropy',
         metrics=['accuracy']):
    # First 3 layers
    inputs = Input(shape=shape, name='input')
    conv1 = Conv2D(filters=filters,
                   kernel_size=3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal',
                   name='conv1_1')(inputs)
    conv1 = Conv2D(filters=filters,
                   kernel_size=3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal',
                   name='conv1_2')(conv1)

    # Second 3 layers
    pool1 = MaxPooling2D(pool_size=2, name='pool2_0')(conv1)
    conv2 = Conv2D(filters=filters * 2,
                   kernel_size=3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal',
                   name='conv2_1')(pool1)
    conv2 = Conv2D(filters=filters * 2,
                   kernel_size=3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal',
                   name='conv2_2')(conv2)

    # Third 3 layers
    pool2 = MaxPooling2D(pool_size=2, name='pool3_0')(conv2)
    conv3 = Conv2D(filters=filters * 4,
                   kernel_size=3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal',
                   name='conv3_1')(pool2)
    conv3 = Conv2D(filters=filters * 4,
                   kernel_size=3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal',
                   name='conv3_2')(conv3)

    # Fourth 3 layers
    pool3 = MaxPooling2D(pool_size=2, name='pool4_0')(conv3)
    conv4 = Conv2D(filters=filters * 8,
                   kernel_size=3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal',
                   name='conv4_1')(pool3)
    conv4 = Conv2D(filters=filters * 8,
                   kernel_size=3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal',
                   name='conv4_2')(conv4)

    # Fifth Dropout layers
    drop4 = Dropout(rate=0.5, name='drop5_0')(conv4)

    # Fifth 3 layers
    pool4 = MaxPooling2D(pool_size=2, name='pool5_0')(drop4)
    conv5 = Conv2D(filters=filters * 16,
                   kernel_size=3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal',
                   name='conv5_1')(pool4)
    conv5 = Conv2D(filters=filters * 16,
                   kernel_size=3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal',
                   name='conv5_2')(conv5)

    # sixth Dropout layers
    drop5 = Dropout(rate=0.5, name='drop6_0')(conv5)

    # seventh concat layer
    up6 = Conv2D(filters=filters * 8,
                 kernel_size=2,
                 activation='relu',
                 padding='same',
                 kernel_initializer='he_normal',
                 name='conv7_concat')(UpSampling2D(size=2,
                                                   name='up7_concat')(drop5))
    merge6 = concatenate([drop4, up6], axis=3, name='concat7_0')

    # Rest of seventh layer
    conv6 = Conv2D(filters=filters * 8,
                   kernel_size=3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal',
                   name='conv7_1')(merge6)
    conv6 = Conv2D(filters=filters * 8,
                   kernel_size=3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal',
                   name='conv7_2')(conv6)

    # Eighth concat layer
    up7 = Conv2D(filters=filters * 4,
                 kernel_size=2,
                 activation='relu',
                 padding='same',
                 kernel_initializer='he_normal',
                 name='conv8_concat')(UpSampling2D(size=2,
                                                   name='up8_concat')(conv6))
    merge7 = concatenate([conv3, up7], axis=3, name='concat8_0')

    # Rest of eighth layer
    conv7 = Conv2D(filters=filters * 4,
                   kernel_size=3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal',
                   name='conv8_1')(merge7)
    conv7 = Conv2D(filters=filters * 4,
                   kernel_size=3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal',
                   name='conv8_2')(conv7)

    # Ninth concat layer
    up8 = Conv2D(filters=filters * 2,
                 kernel_size=2,
                 activation='relu',
                 padding='same',
                 kernel_initializer='he_normal',
                 name='conv9_concat')(UpSampling2D(size=2,
                                                   name='up9_concat')(conv7))
    merge8 = concatenate([conv2, up8], axis=3, name='concat9_0')

    # Rest of Ninth layer
    conv8 = Conv2D(filters=filters * 2,
                   kernel_size=3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal',
                   name='conv9_1')(merge8)
    conv8 = Conv2D(filters=filters * 2,
                   kernel_size=3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal',
                   name='conv9_2')(conv8)

    # Tenth concat layer
    up9 = Conv2D(filters=filters,
                 kernel_size=2,
                 activation='relu',
                 padding='same',
                 kernel_initializer='he_normal',
                 name='conv10_concat')(UpSampling2D(size=2,
                                                    name='up10_concat')(conv8))
    merge9 = concatenate([conv1, up9], axis=3, name='concat10_0')

    # Rest of Tenth layer
    conv9 = Conv2D(filters=filters,
                   kernel_size=3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal',
                   name='conv10_1')(merge9)
    conv9 = Conv2D(filters=filters,
                   kernel_size=3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal',
                   name='conv10_2')(conv9)
    conv9 = Conv2D(filters=2,
                   kernel_size=3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal',
                   name='conv10_3')(conv9)

    conv10 = Conv2D(filters=1,
                    kernel_size=1,
                    activation='sigmoid',
                    name='conv11_0')(conv9)

    model = Model(inputs, conv10, name="unet")

    model.compile(optimizer=optimizer, loss=loss, metrics=metrics)

    #model.summary()

    if (pretrained_weights):
        model.load_weights(pretrained_weights)

    return model
Exemplo n.º 6
0
    def load_architecture(self):
        """
		Returns tf.keras.models.Model instance
		"""
        inp_image = Input(shape=[None, None, 3])

        x = ConvBlock.get_conv_block(inp_image, [{
            'filter': 32,
            'kernel': 3,
            'stride': 1,
            'bnorm': True,
            'leaky': True,
            'layer_idx': 0
        }, {
            'filter': 64,
            'kernel': 3,
            'stride': 2,
            'bnorm': True,
            'leaky': True,
            'layer_idx': 1
        }, {
            'filter': 32,
            'kernel': 1,
            'stride': 1,
            'bnorm': True,
            'leaky': True,
            'layer_idx': 2
        }, {
            'filter': 64,
            'kernel': 3,
            'stride': 1,
            'bnorm': True,
            'leaky': True,
            'layer_idx': 3
        }])

        x = ConvBlock.get_conv_block(x, [{
            'filter': 128,
            'kernel': 3,
            'stride': 2,
            'bnorm': True,
            'leaky': True,
            'layer_idx': 5
        }, {
            'filter': 64,
            'kernel': 1,
            'stride': 1,
            'bnorm': True,
            'leaky': True,
            'layer_idx': 6
        }, {
            'filter': 128,
            'kernel': 3,
            'stride': 1,
            'bnorm': True,
            'leaky': True,
            'layer_idx': 7
        }])

        x = ConvBlock.get_conv_block(x, [{
            'filter': 64,
            'kernel': 1,
            'stride': 1,
            'bnorm': True,
            'leaky': True,
            'layer_idx': 9
        }, {
            'filter': 128,
            'kernel': 3,
            'stride': 1,
            'bnorm': True,
            'leaky': True,
            'layer_idx': 10
        }])

        x = ConvBlock.get_conv_block(x, [{
            'filter': 256,
            'kernel': 3,
            'stride': 2,
            'bnorm': True,
            'leaky': True,
            'layer_idx': 12
        }, {
            'filter': 128,
            'kernel': 1,
            'stride': 1,
            'bnorm': True,
            'leaky': True,
            'layer_idx': 13
        }, {
            'filter': 256,
            'kernel': 3,
            'stride': 1,
            'bnorm': True,
            'leaky': True,
            'layer_idx': 14
        }])

        for i in range(7):
            x = ConvBlock.get_conv_block(x, [{
                'filter': 128,
                'kernel': 1,
                'stride': 1,
                'bnorm': True,
                'leaky': True,
                'layer_idx': 16 + i * 3
            }, {
                'filter': 256,
                'kernel': 3,
                'stride': 1,
                'bnorm': True,
                'leaky': True,
                'layer_idx': 17 + i * 3
            }])

        skip_36 = x

        x = ConvBlock.get_conv_block(x, [{
            'filter': 512,
            'kernel': 3,
            'stride': 2,
            'bnorm': True,
            'leaky': True,
            'layer_idx': 37
        }, {
            'filter': 256,
            'kernel': 1,
            'stride': 1,
            'bnorm': True,
            'leaky': True,
            'layer_idx': 38
        }, {
            'filter': 512,
            'kernel': 3,
            'stride': 1,
            'bnorm': True,
            'leaky': True,
            'layer_idx': 39
        }])

        for i in range(7):
            x = ConvBlock.get_conv_block(x, [{
                'filter': 256,
                'kernel': 1,
                'stride': 1,
                'bnorm': True,
                'leaky': True,
                'layer_idx': 41 + i * 3
            }, {
                'filter': 512,
                'kernel': 3,
                'stride': 1,
                'bnorm': True,
                'leaky': True,
                'layer_idx': 42 + i * 3
            }])

        skip_61 = x

        x = ConvBlock.get_conv_block(x, [{
            'filter': 1024,
            'kernel': 3,
            'stride': 2,
            'bnorm': True,
            'leaky': True,
            'layer_idx': 62
        }, {
            'filter': 512,
            'kernel': 1,
            'stride': 1,
            'bnorm': True,
            'leaky': True,
            'layer_idx': 63
        }, {
            'filter': 1024,
            'kernel': 3,
            'stride': 1,
            'bnorm': True,
            'leaky': True,
            'layer_idx': 64
        }])

        for i in range(3):
            x = ConvBlock.get_conv_block(x, [{
                'filter': 512,
                'kernel': 1,
                'stride': 1,
                'bnorm': True,
                'leaky': True,
                'layer_idx': 66 + i * 3
            }, {
                'filter': 1024,
                'kernel': 3,
                'stride': 1,
                'bnorm': True,
                'leaky': True,
                'layer_idx': 67 + i * 3
            }])

        x = ConvBlock.get_conv_block(x, [{
            'filter': 512,
            'kernel': 1,
            'stride': 1,
            'bnorm': True,
            'leaky': True,
            'layer_idx': 75
        }, {
            'filter': 1024,
            'kernel': 3,
            'stride': 1,
            'bnorm': True,
            'leaky': True,
            'layer_idx': 76
        }, {
            'filter': 512,
            'kernel': 1,
            'stride': 1,
            'bnorm': True,
            'leaky': True,
            'layer_idx': 77
        }, {
            'filter': 1024,
            'kernel': 3,
            'stride': 1,
            'bnorm': True,
            'leaky': True,
            'layer_idx': 78
        }, {
            'filter': 512,
            'kernel': 1,
            'stride': 1,
            'bnorm': True,
            'leaky': True,
            'layer_idx': 79
        }],
                                     skip=False)

        yolo_82 = ConvBlock.get_conv_block(x, [{
            'filter': 1024,
            'kernel': 3,
            'stride': 1,
            'bnorm': True,
            'leaky': True,
            'layer_idx': 80
        }, {
            'filter': 255,
            'kernel': 1,
            'stride': 1,
            'bnorm': False,
            'leaky': False,
            'layer_idx': 81
        }],
                                           skip=False)

        x = ConvBlock.get_conv_block(x, [{
            'filter': 256,
            'kernel': 1,
            'stride': 1,
            'bnorm': True,
            'leaky': True,
            'layer_idx': 84
        }],
                                     skip=False)
        x = UpSampling2D(2)(x)
        x = concatenate([x, skip_61])

        x = ConvBlock.get_conv_block(x, [{
            'filter': 256,
            'kernel': 1,
            'stride': 1,
            'bnorm': True,
            'leaky': True,
            'layer_idx': 87
        }, {
            'filter': 512,
            'kernel': 3,
            'stride': 1,
            'bnorm': True,
            'leaky': True,
            'layer_idx': 88
        }, {
            'filter': 256,
            'kernel': 1,
            'stride': 1,
            'bnorm': True,
            'leaky': True,
            'layer_idx': 89
        }, {
            'filter': 512,
            'kernel': 3,
            'stride': 1,
            'bnorm': True,
            'leaky': True,
            'layer_idx': 90
        }, {
            'filter': 256,
            'kernel': 1,
            'stride': 1,
            'bnorm': True,
            'leaky': True,
            'layer_idx': 91
        }],
                                     skip=False)

        yolo_94 = ConvBlock.get_conv_block(x, [{
            'filter': 512,
            'kernel': 3,
            'stride': 1,
            'bnorm': True,
            'leaky': True,
            'layer_idx': 92
        }, {
            'filter': 255,
            'kernel': 1,
            'stride': 1,
            'bnorm': False,
            'leaky': False,
            'layer_idx': 93
        }],
                                           skip=False)

        x = ConvBlock.get_conv_block(x, [{
            'filter': 128,
            'kernel': 1,
            'stride': 1,
            'bnorm': True,
            'leaky': True,
            'layer_idx': 96
        }],
                                     skip=False)
        x = UpSampling2D(2)(x)
        x = concatenate([x, skip_36])

        yolo_106 = ConvBlock.get_conv_block(x, [{
            'filter': 128,
            'kernel': 1,
            'stride': 1,
            'bnorm': True,
            'leaky': True,
            'layer_idx': 99
        }, {
            'filter': 256,
            'kernel': 3,
            'stride': 1,
            'bnorm': True,
            'leaky': True,
            'layer_idx': 100
        }, {
            'filter': 128,
            'kernel': 1,
            'stride': 1,
            'bnorm': True,
            'leaky': True,
            'layer_idx': 101
        }, {
            'filter': 256,
            'kernel': 3,
            'stride': 1,
            'bnorm': True,
            'leaky': True,
            'layer_idx': 102
        }, {
            'filter': 128,
            'kernel': 1,
            'stride': 1,
            'bnorm': True,
            'leaky': True,
            'layer_idx': 103
        }, {
            'filter': 256,
            'kernel': 3,
            'stride': 1,
            'bnorm': True,
            'leaky': True,
            'layer_idx': 104
        }, {
            'filter': 255,
            'kernel': 1,
            'stride': 1,
            'bnorm': False,
            'leaky': False,
            'layer_idx': 105
        }],
                                            skip=False)

        model = Model(inp_image, [yolo_82, yolo_94, yolo_106])
        return model
Exemplo n.º 7
0
def SegNet():
    model = Sequential()
    #encoder
    model.add(
        Conv2D(64, (3, 3),
               strides=(1, 1),
               input_shape=(img_w, img_h, 3),
               padding='same',
               activation='relu',
               data_format='channels_last'))
    model.add(BatchNormalization())
    model.add(
        Conv2D(64, (3, 3), strides=(1, 1), padding='same', activation='relu'))
    model.add(BatchNormalization())
    model.add(MaxPooling2D(pool_size=(2, 2)))
    #(128,128)
    model.add(
        Conv2D(128, (3, 3), strides=(1, 1), padding='same', activation='relu'))
    model.add(BatchNormalization())
    model.add(
        Conv2D(128, (3, 3), strides=(1, 1), padding='same', activation='relu'))
    model.add(BatchNormalization())
    model.add(MaxPooling2D(pool_size=(2, 2)))
    #(64,64)
    model.add(
        Conv2D(256, (3, 3), strides=(1, 1), padding='same', activation='relu'))
    model.add(BatchNormalization())
    model.add(
        Conv2D(256, (3, 3), strides=(1, 1), padding='same', activation='relu'))
    model.add(BatchNormalization())
    model.add(
        Conv2D(256, (3, 3), strides=(1, 1), padding='same', activation='relu'))
    model.add(BatchNormalization())
    model.add(MaxPooling2D(pool_size=(2, 2)))
    #(32,32)
    model.add(
        Conv2D(512, (3, 3), strides=(1, 1), padding='same', activation='relu'))
    model.add(BatchNormalization())
    model.add(
        Conv2D(512, (3, 3), strides=(1, 1), padding='same', activation='relu'))
    model.add(BatchNormalization())
    model.add(
        Conv2D(512, (3, 3), strides=(1, 1), padding='same', activation='relu'))
    model.add(BatchNormalization())
    model.add(MaxPooling2D(pool_size=(2, 2)))
    #(16,16)
    model.add(
        Conv2D(512, (3, 3), strides=(1, 1), padding='same', activation='relu'))
    model.add(BatchNormalization())
    model.add(
        Conv2D(512, (3, 3), strides=(1, 1), padding='same', activation='relu'))
    model.add(BatchNormalization())
    model.add(
        Conv2D(512, (3, 3), strides=(1, 1), padding='same', activation='relu'))
    model.add(BatchNormalization())
    model.add(MaxPooling2D(pool_size=(2, 2)))
    #(8,8)
    #decoder
    model.add(UpSampling2D(size=(2, 2)))
    #(16,16)
    model.add(
        Conv2D(512, (3, 3), strides=(1, 1), padding='same', activation='relu'))
    model.add(BatchNormalization())
    model.add(
        Conv2D(512, (3, 3), strides=(1, 1), padding='same', activation='relu'))
    model.add(BatchNormalization())
    model.add(
        Conv2D(512, (3, 3), strides=(1, 1), padding='same', activation='relu'))
    model.add(BatchNormalization())
    model.add(UpSampling2D(size=(2, 2)))
    #(32,32)
    model.add(
        Conv2D(512, (3, 3), strides=(1, 1), padding='same', activation='relu'))
    model.add(BatchNormalization())
    model.add(
        Conv2D(512, (3, 3), strides=(1, 1), padding='same', activation='relu'))
    model.add(BatchNormalization())
    model.add(
        Conv2D(512, (3, 3), strides=(1, 1), padding='same', activation='relu'))
    model.add(BatchNormalization())
    model.add(UpSampling2D(size=(2, 2)))
    #(64,64)
    model.add(
        Conv2D(256, (3, 3), strides=(1, 1), padding='same', activation='relu'))
    model.add(BatchNormalization())
    model.add(
        Conv2D(256, (3, 3), strides=(1, 1), padding='same', activation='relu'))
    model.add(BatchNormalization())
    model.add(
        Conv2D(256, (3, 3), strides=(1, 1), padding='same', activation='relu'))
    model.add(BatchNormalization())
    model.add(UpSampling2D(size=(2, 2)))
    #(128,128)
    model.add(
        Conv2D(128, (3, 3), strides=(1, 1), padding='same', activation='relu'))
    model.add(BatchNormalization())
    model.add(
        Conv2D(128, (3, 3), strides=(1, 1), padding='same', activation='relu'))
    model.add(BatchNormalization())
    model.add(UpSampling2D(size=(2, 2)))
    #(256,256)
    model.add(
        Conv2D(64, (3, 3),
               strides=(1, 1),
               input_shape=(img_w, img_h, 3),
               padding='same',
               activation='relu',
               data_format='channels_last'))
    model.add(BatchNormalization())
    model.add(
        Conv2D(64, (3, 3), strides=(1, 1), padding='same', activation='relu'))
    model.add(BatchNormalization())
    model.add(Conv2D(n_label, (1, 1), strides=(1, 1), padding='same'))
    model.add(Activation('softmax'))
    model.compile(loss='categorical_crossentropy',
                  optimizer='sgd',
                  metrics=['accuracy'])
    model.summary()
    return model
Exemplo n.º 8
0
def tiny_yolo4lite_mobilenet_body(inputs,
                                  num_anchors,
                                  num_classes,
                                  alpha=1.0,
                                  use_spp=True):
    '''Create Tiny YOLO_v3 Lite MobileNet model CNN body in keras.'''
    mobilenet = MobileNet(input_tensor=inputs,
                          weights='imagenet',
                          include_top=False,
                          alpha=alpha)

    # input: 416 x 416 x 3
    # conv_pw_13_relu :13 x 13 x (1024*alpha)
    # conv_pw_11_relu :26 x 26 x (512*alpha)
    # conv_pw_5_relu : 52 x 52 x (256*alpha)

    # f1 :13 x 13 x (1024*alpha) for 416 input
    f1 = mobilenet.get_layer('conv_pw_13_relu').output
    # f2: 26 x 26 x (512*alpha) for 416 input
    f2 = mobilenet.get_layer('conv_pw_11_relu').output

    f1_channel_num = int(1024 * alpha)
    f2_channel_num = int(512 * alpha)

    #feature map 1 head (13 x 13 x (512*alpha) for 416 input)
    x1 = DarknetConv2D_BN_Leaky(f1_channel_num // 2, (1, 1))(f1)
    if use_spp:
        x1 = Spp_Conv2D_BN_Leaky(x1, f1_channel_num // 2)

    #upsample fpn merge for feature map 1 & 2
    x1_upsample = compose(DarknetConv2D_BN_Leaky(f2_channel_num // 2, (1, 1)),
                          UpSampling2D(2))(x1)
    x2 = compose(
        Concatenate(),
        #DarknetConv2D_BN_Leaky(f2_channel_num, (3,3)),
        Depthwise_Separable_Conv2D_BN_Leaky(filters=f2_channel_num,
                                            kernel_size=(3, 3),
                                            block_id_str='15'))(
                                                [x1_upsample, f2])

    #feature map 2 output (26 x 26 x (512*alpha) for 416 input)
    y2 = DarknetConv2D(num_anchors * (num_classes + 5), (1, 1))(x2)

    #downsample fpn merge for feature map 2 & 1
    x2_downsample = compose(
        ZeroPadding2D(((1, 0), (1, 0))),
        #DarknetConv2D_BN_Leaky(f1_channel_num//2, (3,3), strides=(2,2)),
        Darknet_Depthwise_Separable_Conv2D_BN_Leaky(f1_channel_num // 2,
                                                    (3, 3),
                                                    strides=(2, 2),
                                                    block_id_str='16'))(x2)
    x1 = compose(
        Concatenate(),
        #DarknetConv2D_BN_Leaky(f1_channel_num, (3,3)),
        Depthwise_Separable_Conv2D_BN_Leaky(filters=f1_channel_num,
                                            kernel_size=(3, 3),
                                            block_id_str='17'))(
                                                [x2_downsample, x1])

    #feature map 1 output (13 x 13 x (1024*alpha) for 416 input)
    y1 = DarknetConv2D(num_anchors * (num_classes + 5), (1, 1))(x1)

    return Model(inputs, [y1, y2])
Exemplo n.º 9
0
def yolo4lite_mobilenet_body(inputs, num_anchors, num_classes, alpha=1.0):
    '''Create YOLO_v4 Lite MobileNet model CNN body in keras.'''
    mobilenet = MobileNet(input_tensor=inputs,
                          weights='imagenet',
                          include_top=False,
                          alpha=alpha)

    # input: 416 x 416 x 3
    # conv_pw_13_relu :13 x 13 x (1024*alpha)
    # conv_pw_11_relu :26 x 26 x (512*alpha)
    # conv_pw_5_relu : 52 x 52 x (256*alpha)

    # f1: 13 x 13 x (1024*alpha) for 416 input
    f1 = mobilenet.get_layer('conv_pw_13_relu').output
    # f2: 26 x 26 x (512*alpha) for 416 input
    f2 = mobilenet.get_layer('conv_pw_11_relu').output
    # f3: 52 x 52 x (256*alpha) for 416 input
    f3 = mobilenet.get_layer('conv_pw_5_relu').output

    f1_channel_num = int(1024 * alpha)
    f2_channel_num = int(512 * alpha)
    f3_channel_num = int(256 * alpha)

    #feature map 1 head (13 x 13 x (512*alpha) for 416 input)
    x1 = make_yolo_spp_depthwise_separable_head(f1,
                                                f1_channel_num // 2,
                                                block_id_str='14')

    #upsample fpn merge for feature map 1 & 2
    x1_upsample = compose(DarknetConv2D_BN_Leaky(f2_channel_num // 2, (1, 1)),
                          UpSampling2D(2))(x1)

    x2 = DarknetConv2D_BN_Leaky(f2_channel_num // 2, (1, 1))(f2)
    x2 = Concatenate()([x2, x1_upsample])

    #feature map 2 head (26 x 26 x (256*alpha) for 416 input)
    x2 = make_yolo_depthwise_separable_head(x2,
                                            f2_channel_num // 2,
                                            block_id_str='15')

    #upsample fpn merge for feature map 2 & 3
    x2_upsample = compose(DarknetConv2D_BN_Leaky(f3_channel_num // 2, (1, 1)),
                          UpSampling2D(2))(x2)

    x3 = DarknetConv2D_BN_Leaky(f3_channel_num // 2, (1, 1))(f3)
    x3 = Concatenate()([x3, x2_upsample])

    #feature map 3 head & output (52 x 52 x (256*alpha) for 416 input)
    #x3, y3 = make_depthwise_separable_last_layers(x3, f3_channel_num//2, num_anchors*(num_classes+5), block_id_str='16')
    x3 = make_yolo_depthwise_separable_head(x3,
                                            f3_channel_num // 2,
                                            block_id_str='16')
    y3 = compose(
        Depthwise_Separable_Conv2D_BN_Leaky(f3_channel_num, (3, 3),
                                            block_id_str='16_3'),
        DarknetConv2D(num_anchors * (num_classes + 5), (1, 1)))(x3)

    #downsample fpn merge for feature map 3 & 2
    x3_downsample = compose(
        ZeroPadding2D(((1, 0), (1, 0))),
        Darknet_Depthwise_Separable_Conv2D_BN_Leaky(f2_channel_num // 2,
                                                    (3, 3),
                                                    strides=(2, 2),
                                                    block_id_str='16_4'))(x3)

    x2 = Concatenate()([x3_downsample, x2])

    #feature map 2 output (26 x 26 x (512*alpha) for 416 input)
    #x2, y2 = make_depthwise_separable_last_layers(x2, f2_channel_num//2, num_anchors*(num_classes+5), block_id_str='17')
    x2 = make_yolo_depthwise_separable_head(x2,
                                            f2_channel_num // 2,
                                            block_id_str='17')
    y2 = compose(
        Depthwise_Separable_Conv2D_BN_Leaky(f2_channel_num, (3, 3),
                                            block_id_str='17_3'),
        DarknetConv2D(num_anchors * (num_classes + 5), (1, 1)))(x2)

    #downsample fpn merge for feature map 2 & 1
    x2_downsample = compose(
        ZeroPadding2D(((1, 0), (1, 0))),
        Darknet_Depthwise_Separable_Conv2D_BN_Leaky(f1_channel_num // 2,
                                                    (3, 3),
                                                    strides=(2, 2),
                                                    block_id_str='17_4'))(x2)

    x1 = Concatenate()([x2_downsample, x1])

    #feature map 1 output (13 x 13 x (1024*alpha) for 416 input)
    #x1, y1 = make_depthwise_separable_last_layers(x1, f1_channel_num//2, num_anchors*(num_classes+5), block_id_str='18')
    x1 = make_yolo_depthwise_separable_head(x1,
                                            f1_channel_num // 2,
                                            block_id_str='18')
    y1 = compose(
        Depthwise_Separable_Conv2D_BN_Leaky(f1_channel_num, (3, 3),
                                            block_id_str='18_3'),
        DarknetConv2D(num_anchors * (num_classes + 5), (1, 1)))(x1)

    return Model(inputs, [y1, y2, y3])
Exemplo n.º 10
0
 def deconv2d_block(input):
     x = Conv2D(256, kernel_size=3, strides=1, padding='same')(input)
     x = UpSampling2D(size=2)(x)
     x = PReLU(alpha_initializer='zeros', shared_axes=[1, 2])(x)
     #x = Activation('relu')(x)
     return x
 def final_layer(self, x):
     x = UpSampling2D(size=(2, 2))(x)
     x = Conv2D(1, 1, use_bias=False, kernel_initializer='he_normal')(x)
     x = BatchNormalization(axis=3)(x)
     x = Activation('sigmoid', name='Classification')(x)
     return x
Exemplo n.º 12
0
def build_model(color_mode):
    # set channels
    if color_mode == "grayscale":
        channels = 1
    elif color_mode == "rgb":
        channels = 3
    img_dim = (*SHAPE, channels)

    # input
    input_img = Input(shape=img_dim)

    # encoder
    encoding_dim = 64  # 128
    x = Conv2D(32, (5, 5),
               padding="same",
               kernel_regularizer=regularizers.l2(1e-6))(input_img)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.1)(x)
    x = MaxPooling2D((2, 2), padding="same")(x)

    # added ---------------------------------------------------------------------------
    x = Conv2D(32, (5, 5),
               padding="same",
               kernel_regularizer=regularizers.l2(1e-6))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.1)(x)
    x = MaxPooling2D((2, 2), padding="same")(x)
    # ---------------------------------------------------------------------------------

    x = Conv2D(64, (5, 5),
               padding="same",
               kernel_regularizer=regularizers.l2(1e-6))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.1)(x)
    x = MaxPooling2D((2, 2), padding="same")(x)

    # added ---------------------------------------------------------------------------
    x = Conv2D(64, (5, 5),
               padding="same",
               kernel_regularizer=regularizers.l2(1e-6))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.1)(x)
    x = MaxPooling2D((2, 2), padding="same")(x)
    # ---------------------------------------------------------------------------------

    x = Conv2D(128, (5, 5),
               padding="same",
               kernel_regularizer=regularizers.l2(1e-6))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.1)(x)
    x = MaxPooling2D((2, 2), padding="same")(x)

    # added ---------------------------------------------------------------------------
    x = Conv2D(128, (5, 5),
               padding="same",
               kernel_regularizer=regularizers.l2(1e-6))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.1)(x)
    x = MaxPooling2D((2, 2), padding="same")(x)
    # ---------------------------------------------------------------------------------

    x = Flatten()(x)
    x = Dense(encoding_dim, kernel_regularizer=regularizers.l2(1e-6))(x)
    x = LeakyReLU(alpha=0.1)(x)
    encoded = x

    # decoder
    x = Reshape((4, 4, encoding_dim // 16))(x)
    x = Conv2D(128, (5, 5),
               padding="same",
               kernel_regularizer=regularizers.l2(1e-6))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.1)(x)
    x = UpSampling2D((2, 2))(x)

    ## added ---------------------------------------------------------------------------
    x = Conv2D(128, (5, 5),
               padding="same",
               kernel_regularizer=regularizers.l2(1e-6))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.1)(x)
    x = UpSampling2D((2, 2))(x)
    # ---------------------------------------------------------------------------------

    x = Conv2D(64, (5, 5),
               padding="same",
               kernel_regularizer=regularizers.l2(1e-6))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.1)(x)
    x = UpSampling2D((2, 2))(x)

    ## added ---------------------------------------------------------------------------
    x = Conv2D(64, (5, 5),
               padding="same",
               kernel_regularizer=regularizers.l2(1e-6))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.1)(x)
    x = UpSampling2D((2, 2))(x)
    # ---------------------------------------------------------------------------------

    x = Conv2D(32, (5, 5),
               padding="same",
               kernel_regularizer=regularizers.l2(1e-6))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.1)(x)
    x = UpSampling2D((2, 2))(x)

    ## added ---------------------------------------------------------------------------
    x = Conv2D(32, (5, 5),
               padding="same",
               kernel_regularizer=regularizers.l2(1e-6))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.1)(x)
    x = UpSampling2D((2, 2))(x)
    # ---------------------------------------------------------------------------------

    x = Conv2D(img_dim[2], (5, 5),
               padding="same",
               kernel_regularizer=regularizers.l2(1e-6))(x)
    x = BatchNormalization()(x)
    x = Activation("sigmoid")(x)
    decoded = x
    # model
    autoencoder = Model(input_img, decoded)
    return autoencoder
Exemplo n.º 13
0
    def model(self):
        input_img = Input(shape=(self.map_size, self.map_size,
                                 self.channels[0]))

        x1 = Conv2D(16,
                    3,
                    activation='relu',
                    padding='same',
                    kernel_initializer='he_normal')(input_img)
        x1 = BatchNormalization()(x1)

        pool1 = AveragePooling2D(pool_size=(2, 2))(x1)
        x2 = Conv2D(32,
                    3,
                    activation='relu',
                    padding='same',
                    kernel_initializer='he_normal')(pool1)
        x2 = BatchNormalization()(x2)

        pool2 = AveragePooling2D(pool_size=(2, 2))(x2)
        x3 = Conv2D(64,
                    3,
                    activation='relu',
                    padding='same',
                    kernel_initializer='he_normal')(pool2)
        x3 = BatchNormalization()(x3)

        pool3 = AveragePooling2D(pool_size=(2, 2))(x3)
        x4 = Conv2D(64,
                    3,
                    activation='relu',
                    padding='same',
                    kernel_initializer='he_normal')(pool3)
        x4 = BatchNormalization()(x4)

        pool_deep = AveragePooling2D(pool_size=(2, 2))(x4)
        xdeep = Conv2D(64,
                       3,
                       activation='relu',
                       padding='same',
                       kernel_initializer='he_normal')(pool_deep)
        xdeep = BatchNormalization()(xdeep)

        updeep = UpSampling2D((2, 2))(xdeep)
        mergedeep = concatenate([x4, updeep], axis=3)

        xdeep2 = Conv2D(64,
                        3,
                        activation='relu',
                        padding='same',
                        kernel_initializer='he_normal')(mergedeep)
        xdeep2 = BatchNormalization()(xdeep2)

        up5 = UpSampling2D((2, 2))(xdeep2)
        merge5 = concatenate([x3, up5], axis=3)
        merge5 = BatchNormalization()(merge5)

        x5 = Conv2D(64,
                    3,
                    activation='relu',
                    padding='same',
                    kernel_initializer='he_normal')(merge5)

        up6 = UpSampling2D((2, 2))(x5)
        merge6 = concatenate([x2, up6], axis=3)
        merge6 = BatchNormalization()(merge6)

        x6 = Conv2D(32,
                    3,
                    activation='relu',
                    padding='same',
                    kernel_initializer='he_normal')(merge6)

        up7 = UpSampling2D((2, 2))(x6)
        merge7 = concatenate([x1, up7], axis=3)
        merge7 = BatchNormalization()(merge7)

        x7 = Conv2D(16,
                    3,
                    activation='relu',
                    padding='same',
                    kernel_initializer='he_normal')(merge7)
        output = Conv2D(self.channels[1], 1, activation='sigmoid')(x7)

        unet = Model(input_img, output)
        unet.summary()

        if self.learning_rate is None:
            unet.compile(optimizer='adam', loss='mse')
        else:
            unet.compile(optimizer=Adam(lr=self.learning_rate), loss='mse')

        return unet
Exemplo n.º 14
0
# 4, 4, 8
autoencoder.add(
    Conv2D(filters=8,
           kernel_size=(3, 3),
           activation='relu',
           padding='same',
           strides=(2, 2)))

autoencoder.add(Flatten())

autoencoder.add(Reshape((4, 4, 8)))

# Decoder
autoencoder.add(
    Conv2D(filters=8, kernel_size=(3, 3), activation='relu', padding='same'))
autoencoder.add(UpSampling2D(size=(2, 2)))
autoencoder.add(
    Conv2D(filters=8, kernel_size=(3, 3), activation='relu', padding='same'))
autoencoder.add(UpSampling2D(size=(2, 2)))
autoencoder.add(Conv2D(filters=16, kernel_size=(3, 3), activation='relu'))
autoencoder.add(UpSampling2D(size=(2, 2)))
autoencoder.add(
    Conv2D(filters=1, kernel_size=(3, 3), activation='sigmoid',
           padding='same'))
autoencoder.summary()

autoencoder.compile(optimizer='adam',
                    loss='binary_crossentropy',
                    metrics=['accuracy'])
autoencoder.fit(previsores_treinamento,
                previsores_treinamento,
Exemplo n.º 15
0
def yoloNano(anchors,
             input_size=416,
             include_attention=True,
             num_classes=1,
             expension=.75,
             decay=0.0005):
    #f**k tensorflow 2.x
    #backbone

    input_0 = Input(shape=(None, None, 3))
    input_gt = [
        Input(shape=(None, None, len(anchors) // 3, num_classes + 5))
        for l in range(3)
    ]

    x = Conv2D(filters=12,
               strides=(1, 1),
               kernel_size=(3, 3),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(input_0)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.1)(x)
    x = Conv2D(filters=24,
               strides=(2, 2),
               kernel_size=(3, 3),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x_0 = LeakyReLU(alpha=0.1)(x)
    #PEP(7)(208x208x24)
    x = Conv2D(filters=7,
               strides=(1, 1),
               kernel_size=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x_0)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.1)(x)
    x = Conv2D(filters=math.ceil(24 * expension),
               strides=(1, 1),
               kernel_size=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.1)(x)
    x = DepthwiseConv2D(strides=(1, 1),
                        kernel_size=(3, 3),
                        use_bias=False,
                        padding='same',
                        kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.1)(x)
    x = Conv2D(filters=24,
               strides=(1, 1),
               kernel_size=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = Add()([x_0, x])
    #EP(104x104x70)
    x = Conv2D(filters=math.ceil(70 * expension),
               strides=(1, 1),
               kernel_size=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.1)(x)
    x = DepthwiseConv2D(strides=(2, 2),
                        kernel_size=(3, 3),
                        use_bias=False,
                        padding='same',
                        kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.1)(x)
    x = Conv2D(filters=70,
               strides=(1, 1),
               kernel_size=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x)
    x_1 = BatchNormalization()(x)
    #PEP(25)(104x104x70)
    x = Conv2D(filters=25,
               strides=(1, 1),
               kernel_size=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x_1)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.1)(x)
    x = Conv2D(filters=math.ceil(70 * expension),
               strides=(1, 1),
               kernel_size=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.1)(x)
    x = DepthwiseConv2D(strides=(1, 1),
                        kernel_size=(3, 3),
                        use_bias=False,
                        padding='same',
                        kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.1)(x)
    x = Conv2D(filters=70,
               strides=(1, 1),
               kernel_size=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x_2 = Add()([x_1, x])
    # PEP(24)(104x104x70)
    x = Conv2D(filters=24,
               strides=(1, 1),
               kernel_size=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x_2)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.1)(x)
    x = Conv2D(filters=math.ceil(70 * expension),
               strides=(1, 1),
               kernel_size=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.1)(x)
    x = DepthwiseConv2D(strides=(1, 1),
                        kernel_size=(3, 3),
                        use_bias=False,
                        padding='same',
                        kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.1)(x)
    x = Conv2D(filters=70,
               strides=(1, 1),
               kernel_size=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = Add()([x_2, x])
    # EP(52x52x150)
    x = Conv2D(filters=math.ceil(150 * expension),
               strides=(1, 1),
               kernel_size=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.1)(x)
    x = DepthwiseConv2D(strides=(2, 2),
                        kernel_size=(3, 3),
                        use_bias=False,
                        padding='same',
                        kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.1)(x)
    x = Conv2D(filters=150,
               strides=(1, 1),
               kernel_size=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x)
    x_3 = BatchNormalization()(x)
    # PEP(56)(52x52x150)
    x = Conv2D(filters=56,
               strides=(1, 1),
               kernel_size=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x_3)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.1)(x)
    x = Conv2D(filters=math.ceil(150 * expension),
               strides=(1, 1),
               kernel_size=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.1)(x)
    x = DepthwiseConv2D(strides=(1, 1),
                        kernel_size=(3, 3),
                        use_bias=False,
                        padding='same',
                        kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.1)(x)
    x = Conv2D(filters=150,
               strides=(1, 1),
               kernel_size=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = Add()([x_3, x])
    #Conv1x1
    x = Conv2D(filters=150,
               kernel_size=(1, 1),
               strides=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x_4 = LeakyReLU(alpha=0.1)(x)
    #FCA(8)
    if include_attention:
        x = GlobalAveragePooling2D()(x_4)
        x = Dense(units=150 // 8,
                  activation='relu',
                  use_bias=False,
                  kernel_regularizer=l2(l=decay))(x)
        x = Dense(units=150,
                  activation='sigmoid',
                  use_bias=False,
                  kernel_regularizer=l2(l=decay))(x)
        x_5 = Multiply()([x_4, x])
    else:
        x_5 = x_4
    #PEP(73)(52x52x150)
    x = Conv2D(filters=73,
               strides=(1, 1),
               kernel_size=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x_5)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.1)(x)
    x = Conv2D(filters=math.ceil(150 * expension),
               strides=(1, 1),
               kernel_size=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.1)(x)
    x = DepthwiseConv2D(strides=(1, 1),
                        kernel_size=(3, 3),
                        use_bias=False,
                        padding='same',
                        kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.1)(x)
    x = Conv2D(filters=150,
               strides=(1, 1),
               kernel_size=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x_6 = Add()([x_5, x])
    # PEP(71)(52x52x150)
    x = Conv2D(filters=71,
               strides=(1, 1),
               kernel_size=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x_6)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.1)(x)
    x = Conv2D(filters=math.ceil(150 * expension),
               strides=(1, 1),
               kernel_size=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.1)(x)
    x = DepthwiseConv2D(strides=(1, 1),
                        kernel_size=(3, 3),
                        use_bias=False,
                        padding='same',
                        kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.1)(x)
    x = Conv2D(filters=150,
               strides=(1, 1),
               kernel_size=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x_7 = Add()([x_6, x])
    # PEP(75)(52x52x150)
    x = Conv2D(filters=75,
               strides=(1, 1),
               kernel_size=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x_7)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.1)(x)
    x = Conv2D(filters=math.ceil(150 * expension),
               strides=(1, 1),
               kernel_size=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.1)(x)
    x = DepthwiseConv2D(strides=(1, 1),
                        kernel_size=(3, 3),
                        use_bias=False,
                        padding='same',
                        kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.1)(x)
    x = Conv2D(filters=150,
               strides=(1, 1),
               kernel_size=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x_8 = Add()([x_7, x])  #output 52x52x150
    #EP(26x26x325)
    x = Conv2D(filters=math.ceil(325 * expension),
               strides=(1, 1),
               kernel_size=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x_8)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.1)(x)
    x = DepthwiseConv2D(strides=(2, 2),
                        kernel_size=(3, 3),
                        use_bias=False,
                        padding='same',
                        kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.1)(x)
    x = Conv2D(filters=325,
               strides=(1, 1),
               kernel_size=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x)
    x_9 = BatchNormalization()(x)
    # PEP(132)(26x26x325)
    x = Conv2D(filters=132,
               strides=(1, 1),
               kernel_size=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x_9)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.1)(x)
    x = Conv2D(filters=math.ceil(325 * expension),
               strides=(1, 1),
               kernel_size=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.1)(x)
    x = DepthwiseConv2D(strides=(1, 1),
                        kernel_size=(3, 3),
                        use_bias=False,
                        padding='same',
                        kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.1)(x)
    x = Conv2D(filters=325,
               strides=(1, 1),
               kernel_size=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x_10 = Add()([x_9, x])
    # PEP(124)(26x26x325)
    x = Conv2D(filters=124,
               strides=(1, 1),
               kernel_size=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x_10)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.1)(x)
    x = Conv2D(filters=math.ceil(325 * expension),
               strides=(1, 1),
               kernel_size=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.1)(x)
    x = DepthwiseConv2D(strides=(1, 1),
                        kernel_size=(3, 3),
                        use_bias=False,
                        padding='same',
                        kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.1)(x)
    x = Conv2D(filters=325,
               strides=(1, 1),
               kernel_size=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x_11 = Add()([x_10, x])
    # PEP(141)(26x26x325)
    x = Conv2D(filters=141,
               strides=(1, 1),
               kernel_size=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x_11)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.1)(x)
    x = Conv2D(filters=math.ceil(325 * expension),
               strides=(1, 1),
               kernel_size=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.1)(x)
    x = DepthwiseConv2D(strides=(1, 1),
                        kernel_size=(3, 3),
                        use_bias=False,
                        padding='same',
                        kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.1)(x)
    x = Conv2D(filters=325,
               strides=(1, 1),
               kernel_size=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x_12 = Add()([x_11, x])
    # PEP(140)(26x26x325)
    x = Conv2D(filters=140,
               strides=(1, 1),
               kernel_size=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x_12)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.1)(x)
    x = Conv2D(filters=math.ceil(325 * expension),
               strides=(1, 1),
               kernel_size=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.1)(x)
    x = DepthwiseConv2D(strides=(1, 1),
                        kernel_size=(3, 3),
                        use_bias=False,
                        padding='same',
                        kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.1)(x)
    x = Conv2D(filters=325,
               strides=(1, 1),
               kernel_size=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x_13 = Add()([x_12, x])
    # PEP(137)(26x26x325)
    x = Conv2D(filters=137,
               strides=(1, 1),
               kernel_size=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x_13)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.1)(x)
    x = Conv2D(filters=math.ceil(325 * expension),
               strides=(1, 1),
               kernel_size=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.1)(x)
    x = DepthwiseConv2D(strides=(1, 1),
                        kernel_size=(3, 3),
                        use_bias=False,
                        padding='same',
                        kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.1)(x)
    x = Conv2D(filters=325,
               strides=(1, 1),
               kernel_size=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x_14 = Add()([x_13, x])
    # PEP(135)(26x26x325)
    x = Conv2D(filters=135,
               strides=(1, 1),
               kernel_size=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x_14)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.1)(x)
    x = Conv2D(filters=math.ceil(325 * expension),
               strides=(1, 1),
               kernel_size=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.1)(x)
    x = DepthwiseConv2D(strides=(1, 1),
                        kernel_size=(3, 3),
                        use_bias=False,
                        padding='same',
                        kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.1)(x)
    x = Conv2D(filters=325,
               strides=(1, 1),
               kernel_size=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x_15 = Add()([x_14, x])
    # PEP(133)(26x26x325)
    x = Conv2D(filters=133,
               strides=(1, 1),
               kernel_size=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x_15)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.1)(x)
    x = Conv2D(filters=math.ceil(325 * expension),
               strides=(1, 1),
               kernel_size=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.1)(x)
    x = DepthwiseConv2D(strides=(1, 1),
                        kernel_size=(3, 3),
                        use_bias=False,
                        padding='same',
                        kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.1)(x)
    x = Conv2D(filters=325,
               strides=(1, 1),
               kernel_size=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x_16 = Add()([x_15, x])
    # PEP(140)(26x26x325)
    x = Conv2D(filters=140,
               strides=(1, 1),
               kernel_size=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x_16)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.1)(x)
    x = Conv2D(filters=math.ceil(325 * expension),
               strides=(1, 1),
               kernel_size=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.1)(x)
    x = DepthwiseConv2D(strides=(1, 1),
                        kernel_size=(3, 3),
                        use_bias=False,
                        padding='same',
                        kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.1)(x)
    x = Conv2D(filters=325,
               strides=(1, 1),
               kernel_size=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x_17 = Add()([x_16, x])  #output 26x26x325
    # EP(13x13x545)
    x = Conv2D(filters=math.ceil(545 * expension),
               strides=(1, 1),
               kernel_size=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x_17)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.1)(x)
    x = DepthwiseConv2D(strides=(2, 2),
                        kernel_size=(3, 3),
                        use_bias=False,
                        padding='same',
                        kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.1)(x)
    x = Conv2D(filters=545,
               strides=(1, 1),
               kernel_size=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x)
    x_18 = BatchNormalization()(x)
    # PEP(276)(13x13x545)
    x = Conv2D(filters=276,
               strides=(1, 1),
               kernel_size=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x_18)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.1)(x)
    x = Conv2D(filters=math.ceil(545 * expension),
               strides=(1, 1),
               kernel_size=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.1)(x)
    x = DepthwiseConv2D(strides=(1, 1),
                        kernel_size=(3, 3),
                        use_bias=False,
                        padding='same',
                        kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.1)(x)
    x = Conv2D(filters=545,
               strides=(1, 1),
               kernel_size=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x_19 = Add()([x_18, x])
    #Conv1x1
    x = Conv2D(filters=230,
               kernel_size=(1, 1),
               strides=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x_19)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.1)(x)
    # EP(13x13x489)
    x = Conv2D(filters=math.ceil(489 * expension),
               strides=(1, 1),
               kernel_size=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.1)(x)
    x = DepthwiseConv2D(strides=(1, 1),
                        kernel_size=(3, 3),
                        use_bias=False,
                        padding='same',
                        kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.1)(x)
    x = Conv2D(filters=489,
               strides=(1, 1),
               kernel_size=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    # PEP(213)(13x13x469)
    x = Conv2D(filters=213,
               strides=(1, 1),
               kernel_size=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.1)(x)
    x = Conv2D(filters=math.ceil(469 * expension),
               strides=(1, 1),
               kernel_size=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.1)(x)
    x = DepthwiseConv2D(strides=(1, 1),
                        kernel_size=(3, 3),
                        use_bias=False,
                        padding='same',
                        kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.1)(x)
    x = Conv2D(filters=469,
               strides=(1, 1),
               kernel_size=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    # Conv1x1
    x = Conv2D(filters=189,
               kernel_size=(1, 1),
               strides=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x_20 = LeakyReLU(alpha=0.1)(x)  #output 13x13x189
    # EP(13x13x462)
    x = Conv2D(filters=math.ceil(462 * expension),
               strides=(1, 1),
               kernel_size=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x_20)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.1)(x)
    x = DepthwiseConv2D(strides=(1, 1),
                        kernel_size=(3, 3),
                        use_bias=False,
                        padding='same',
                        kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.1)(x)
    x = Conv2D(filters=462,
               strides=(1, 1),
               kernel_size=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    # feature 13x13x[(num_classes+5)x3]
    feature_13x13 = Conv2D(filters=3 * (num_classes + 5),
                           kernel_size=(1, 1),
                           strides=(1, 1),
                           use_bias=False,
                           padding='same',
                           kernel_regularizer=l2(l=decay))(x)
    # Conv1x1
    x = Conv2D(filters=105,
               kernel_size=(1, 1),
               strides=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x_20)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.1)(x)
    # upsampling 26x26x105
    x = UpSampling2D()(x)
    # concatenate
    x = Concatenate()([x, x_17])
    # PEP(113)(26x26x325)
    x = Conv2D(filters=113,
               strides=(1, 1),
               kernel_size=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.1)(x)
    x = Conv2D(filters=math.ceil(325 * expension),
               strides=(1, 1),
               kernel_size=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.1)(x)
    x = DepthwiseConv2D(strides=(1, 1),
                        kernel_size=(3, 3),
                        use_bias=False,
                        padding='same',
                        kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.1)(x)
    x = Conv2D(filters=325,
               strides=(1, 1),
               kernel_size=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    # PEP(99)(26x26x207)
    x = Conv2D(filters=99,
               strides=(1, 1),
               kernel_size=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.1)(x)
    x = Conv2D(filters=math.ceil(207 * expension),
               strides=(1, 1),
               kernel_size=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.1)(x)
    x = DepthwiseConv2D(strides=(1, 1),
                        kernel_size=(3, 3),
                        use_bias=False,
                        padding='same',
                        kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.1)(x)
    x = Conv2D(filters=207,
               strides=(1, 1),
               kernel_size=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    # Conv1x1
    x = Conv2D(filters=98,
               kernel_size=(1, 1),
               strides=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x_21 = LeakyReLU(alpha=0.1)(x)
    # EP(13x13x183)
    x = Conv2D(filters=math.ceil(183 * expension),
               strides=(1, 1),
               kernel_size=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x_21)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.1)(x)
    x = DepthwiseConv2D(strides=(1, 1),
                        kernel_size=(3, 3),
                        use_bias=False,
                        padding='same',
                        kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.1)(x)
    x = Conv2D(filters=183,
               strides=(1, 1),
               kernel_size=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    # feature 26x26x[(num_classes+5)x3]
    feature_26x26 = Conv2D(filters=3 * (num_classes + 5),
                           kernel_size=(1, 1),
                           strides=(1, 1),
                           use_bias=False,
                           padding='same',
                           kernel_regularizer=l2(l=decay))(x)
    # Conv1x1
    x = Conv2D(filters=47,
               kernel_size=(1, 1),
               strides=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x_21)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.1)(x)
    #upsampling
    x = UpSampling2D()(x)
    #concatenate
    x = Concatenate()([x, x_8])
    # PEP(58)(52x52x132)
    x = Conv2D(filters=58,
               strides=(1, 1),
               kernel_size=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.1)(x)
    x = Conv2D(filters=math.ceil(132 * expension),
               strides=(1, 1),
               kernel_size=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.1)(x)
    x = DepthwiseConv2D(strides=(1, 1),
                        kernel_size=(3, 3),
                        use_bias=False,
                        padding='same',
                        kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.1)(x)
    x = Conv2D(filters=132,
               strides=(1, 1),
               kernel_size=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    # PEP(52)(52x52x87)
    x = Conv2D(filters=52,
               strides=(1, 1),
               kernel_size=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.1)(x)
    x = Conv2D(filters=math.ceil(87 * expension),
               strides=(1, 1),
               kernel_size=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.1)(x)
    x = DepthwiseConv2D(strides=(1, 1),
                        kernel_size=(3, 3),
                        use_bias=False,
                        padding='same',
                        kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.1)(x)
    x = Conv2D(filters=87,
               strides=(1, 1),
               kernel_size=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    # PEP(47)(52x52x93)
    x = Conv2D(filters=47,
               strides=(1, 1),
               kernel_size=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.1)(x)
    x = Conv2D(filters=math.ceil(93 * expension),
               strides=(1, 1),
               kernel_size=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.1)(x)
    x = DepthwiseConv2D(strides=(1, 1),
                        kernel_size=(3, 3),
                        use_bias=False,
                        padding='same',
                        kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.1)(x)
    x = Conv2D(filters=93,
               strides=(1, 1),
               kernel_size=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    feature_52x52 = Conv2D(filters=3 * (num_classes + 5),
                           kernel_size=(1, 1),
                           strides=(1, 1),
                           use_bias=False,
                           padding='same',
                           kernel_regularizer=l2(l=decay))(x)
    #loss layer
    loss = Lambda(yolo_loss,
                  output_shape=(1, ),
                  name='yolo_loss',
                  arguments={
                      'anchors': anchors,
                      'num_classes': num_classes,
                      'ignore_thresh': 0.5
                  })([feature_13x13, feature_26x26, feature_52x52, *input_gt])

    debug_model = tf.keras.Model(
        inputs=input_0, outputs=[feature_13x13, feature_26x26, feature_52x52])
    train_model = tf.keras.Model(inputs=[input_0, *input_gt], outputs=loss)
    return train_model, debug_model


# import numpy as np
# anchors = np.array([[6.,9.],[8.,13.],[11.,16.],[14.,22.],[17.,37.],[21.,26.],[29.,38.],[39.,62.],[79.,99.]],dtype='float32')
# model,_ = yoloNano(anchors,input_size=416,num_classes=1)
# model.summary()
def build_model(color_mode, filters=[32, 64, 128]):
    # set channels
    if color_mode == "grayscale":
        channels = 1
    elif color_mode == "rgb":
        channels = 3
    img_dim = (*SHAPE, channels)

    # input
    input_img = Input(shape=img_dim)

    # encoder
    x = inception_layer(input_img, filters[0])
    x = MaxPooling2D((2, 2), strides=(2, 2), padding="same")(x)

    # added -------------------------------------
    x = inception_layer(x, filters[0])
    x = MaxPooling2D((2, 2), strides=(2, 2), padding="same")(x)
    # -------------------------------------------

    x = inception_layer(x, filters[1])
    x = MaxPooling2D((2, 2), strides=(2, 2), padding="same")(x)

    # added -------------------------------------
    x = inception_layer(x, filters[1])
    x = MaxPooling2D((2, 2), strides=(2, 2), padding="same")(x)
    # -------------------------------------------

    x = inception_layer(x, filters[2])
    x = MaxPooling2D((2, 2), strides=(2, 2), padding="same")(x)

    # added -------------------------------------
    x = inception_layer(x, filters[2])
    x = MaxPooling2D((2, 2), strides=(2, 2), padding="same")(x)
    # -------------------------------------------

    # encoded = x

    # decoder
    x = inception_layer(x, filters[2])
    x = UpSampling2D((2, 2))(x)

    # added -----------------------------
    x = inception_layer(x, filters[2])
    x = UpSampling2D((2, 2))(x)
    # -----------------------------------

    x = inception_layer(x, filters[1])
    x = UpSampling2D((2, 2))(x)

    # added -----------------------------
    x = inception_layer(x, filters[1])
    x = UpSampling2D((2, 2))(x)
    # -----------------------------------

    x = inception_layer(x, filters[0])
    x = UpSampling2D((2, 2))(x)

    # added -----------------------------
    x = inception_layer(x, filters[0])
    x = UpSampling2D((2, 2))(x)
    # -----------------------------------

    x = Conv2D(img_dim[2], (3, 3),
               padding="same",
               kernel_regularizer=regularizers.l2(1e-6))(x)
    x = BatchNormalization()(x)
    x = Activation("sigmoid")(x)

    decoded = x
    # model
    autoencoder = Model(input_img, decoded)
    return autoencoder
Exemplo n.º 17
0
def yoloNano(anchors, input_size=416, num_classes=1, include_attention=True):

    input_0 = Input(shape=(None, None, 3))
    input_gt = [
        Input(shape=(None, None, len(anchors) // 3, num_classes + 5))
        for l in range(3)
    ]

    x = conv3x3(filters=12, stride=(1, 1))(input_0)
    x = conv3x3(filters=24, stride=(2, 2))(x)
    x = PEP(filters=24, neck_filters=7)(x)
    x = EP(filters=70, stride=(2, 2))(x)
    x = PEP(filters=70, neck_filters=25)(x)
    x = PEP(filters=70, neck_filters=24)(x)
    x = EP(filters=150, stride=(2, 2))(x)
    x = PEP(filters=150, neck_filters=56)(x)
    x = conv1x1(filters=150)(x)
    if include_attention:
        x = FCA(reduction_ratio=8)(x)
    x = PEP(filters=150, neck_filters=73)(x)
    x = PEP(filters=150, neck_filters=71)(x)
    x1 = PEP(filters=150, neck_filters=75)(x)
    x = EP(filters=325, stride=(2, 2))(x1)
    x = PEP(filters=325, neck_filters=132)(x)
    x = PEP(filters=325, neck_filters=124)(x)
    x = PEP(filters=325, neck_filters=141)(x)
    x = PEP(filters=325, neck_filters=140)(x)
    x = PEP(filters=325, neck_filters=137)(x)
    x = PEP(filters=325, neck_filters=135)(x)
    x = PEP(filters=325, neck_filters=133)(x)
    x2 = PEP(filters=325, neck_filters=140)(x)
    x = EP(filters=545, stride=(2, 2))(x2)
    x = PEP(filters=545, neck_filters=276)(x)
    x = conv1x1(filters=230)(x)
    x = EP(filters=489)(x)
    x = PEP(filters=469, neck_filters=213)(x)
    x3 = conv1x1(filters=189)(x)
    x = EP(filters=462)(x3)
    feature_13x13 = conv1x1(filters=3 * (num_classes + 5), bn=False)(x)
    x = conv1x1(filters=105)(x3)
    x = UpSampling2D()(x)
    x = Concatenate()([x, x2])
    x = PEP(filters=325, neck_filters=113)(x)
    x = PEP(filters=207, neck_filters=99)(x)
    x4 = conv1x1(filters=98)(x)
    x = EP(filters=183)(x4)
    feature_26x26 = conv1x1(filters=3 * (num_classes + 5), bn=False)(x)
    x = conv1x1(filters=47)(x4)
    x = UpSampling2D()(x)
    x = Concatenate()([x, x1])
    x = PEP(filters=122, neck_filters=58)(x)
    x = PEP(filters=87, neck_filters=52)(x)
    x = PEP(filters=93, neck_filters=47)(x)
    feature_52x52 = conv1x1(filters=3 * (num_classes + 5), bn=False)(x)

    loss = Lambda(yolo_loss,
                  output_shape=(1, ),
                  name='yolo_loss',
                  arguments={
                      'anchors': anchors,
                      'num_classes': num_classes,
                      'ignore_thresh': 0.5
                  })([feature_13x13, feature_26x26, feature_52x52, *input_gt])

    debug_model = tf.keras.Model(
        inputs=input_0, outputs=[feature_13x13, feature_26x26, feature_52x52])
    train_model = tf.keras.Model(inputs=[input_0, *input_gt], outputs=loss)
    return train_model, debug_model


# import numpy as np
# anchors = np.array([[6.,9.],[8.,13.],[11.,16.],[14.,22.],[17.,37.],[21.,26.],[29.,38.],[39.,62.],[79.,99.]],dtype='float32')
# model,_ = yoloNano(anchors,input_size=416,num_classes=1)
# model.summary()
def create(input_shape,
           drop_prob=0.1,
           reg=None,
           sigma=False,
           activation=tf.nn.relu,
           num_class=1,
           lam=1e-3,
           l=0.5):
    opts = locals().copy()

    concat_axis = 3
    inputs = tf.keras.layers.Input(shape=input_shape)
    # inputs_normalized = tf.multiply(inputs, 1/255.)

    Conv2D_ = functools.partial(Conv2D,
                                activation=activation,
                                padding='same',
                                kernel_regularizer=reg,
                                bias_regularizer=reg)

    conv1 = Conv2D_(32, (3, 3))(inputs)
    conv1 = Conv2D_(32, (3, 3))(conv1)
    pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
    pool1 = SpatialDropout2D(drop_prob)(pool1)

    conv2 = Conv2D_(64, (3, 3))(pool1)
    conv2 = Conv2D_(64, (3, 3))(conv2)
    pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
    pool2 = SpatialDropout2D(drop_prob)(pool2)

    conv3 = Conv2D_(128, (3, 3))(pool2)
    conv3 = Conv2D_(128, (3, 3))(conv3)
    pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
    pool3 = SpatialDropout2D(drop_prob)(pool3)

    conv4 = Conv2D_(256, (3, 3))(pool3)
    conv4 = Conv2D_(256, (3, 3))(conv4)
    pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)
    pool4 = SpatialDropout2D(drop_prob)(pool4)

    conv5 = Conv2D_(512, (3, 3))(pool4)
    conv5 = Conv2D_(512, (3, 3))(conv5)

    up_conv5 = UpSampling2D(size=(2, 2))(conv5)
    ch, cw = get_crop_shape(conv4, up_conv5)
    crop_conv4 = Cropping2D(cropping=(ch, cw))(conv4)
    up6 = concatenate([up_conv5, crop_conv4], axis=concat_axis)
    conv6 = Conv2D_(256, (3, 3))(up6)
    conv6 = Conv2D_(256, (3, 3))(conv6)

    up_conv6 = UpSampling2D(size=(2, 2))(conv6)
    ch, cw = get_crop_shape(conv3, up_conv6)
    crop_conv3 = Cropping2D(cropping=(ch, cw))(conv3)
    up7 = concatenate([up_conv6, crop_conv3], axis=concat_axis)
    conv7 = Conv2D_(128, (3, 3))(up7)
    conv7 = Conv2D_(128, (3, 3))(conv7)

    up_conv7 = UpSampling2D(size=(2, 2))(conv7)
    ch, cw = get_crop_shape(conv2, up_conv7)
    crop_conv2 = Cropping2D(cropping=(ch, cw))(conv2)
    up8 = concatenate([up_conv7, crop_conv2], axis=concat_axis)
    conv8 = Conv2D_(64, (3, 3))(up8)
    conv8 = Conv2D_(64, (3, 3))(conv8)

    up_conv8 = UpSampling2D(size=(2, 2))(conv8)
    ch, cw = get_crop_shape(conv1, up_conv8)
    crop_conv1 = Cropping2D(cropping=(ch, cw))(conv1)
    up9 = concatenate([up_conv8, crop_conv1], axis=concat_axis)
    conv9 = Conv2D_(32, (3, 3))(up9)
    conv9 = Conv2D_(32, (3, 3))(conv9)

    ch, cw = get_crop_shape(inputs, conv9)
    conv9 = ZeroPadding2D(padding=((ch[0], ch[1]), (cw[0], cw[1])))(conv9)
    if sigma:
        conv10 = Conv2DNormal(num_class, (1, 1))(conv9)
    else:
        conv10 = Conv2D(num_class, (1, 1))(conv9)

    # conv10 = tf.multiply(conv10, 255.)
    model = tf.keras.models.Model(inputs=inputs, outputs=conv10)
    return model, opts
Exemplo n.º 19
0
def build_model(input_shape):
    model = Sequential()

    model.add(ZeroPadding2D(padding=((0, 1), (0, 1)), input_shape=input_shape))

    # CONVOLUTION
    model.add(
        Conv2D(n_conv_features, (5, 5), activation='relu', padding="same"))
    model.add(
        Conv2D(n_conv_features, (5, 5), activation='relu', padding="same"))
    model.add(MaxPool2D(pool_size=(2, 2)))
    model.add(BatchNormalization())
    model.add(Dropout(0.25))

    model.add(
        Conv2D(2 * n_conv_features, (5, 5), activation='relu', padding="same"))
    model.add(
        Conv2D(2 * n_conv_features, (5, 5), activation='relu', padding="same"))
    model.add(MaxPool2D(pool_size=(2, 2)))
    model.add(BatchNormalization())
    model.add(Dropout(0.25))

    model.add(
        Conv2D(4 * n_conv_features, (3, 3), activation='relu', padding="same"))
    model.add(
        Conv2D(4 * n_conv_features, (3, 3), activation='relu', padding="same"))
    model.add(MaxPool2D(pool_size=(2, 2)))
    model.add(BatchNormalization())
    model.add(Dropout(0.25))

    model.add(
        Conv2D(8 * n_conv_features, (3, 3), activation='relu', padding="same"))
    model.add(
        Conv2D(8 * n_conv_features, (3, 3), activation='relu', padding="same"))
    model.add(BatchNormalization())
    model.add(Dropout(0.25))

    # DECONVOLUTION
    model.add(
        Conv2DTranspose(8 * n_conv_features, (3, 3),
                        activation='relu',
                        padding="same"))
    model.add(
        Conv2DTranspose(8 * n_conv_features, (3, 3),
                        activation='relu',
                        padding="same"))
    model.add(BatchNormalization())
    model.add(Dropout(0.25))

    model.add(UpSampling2D(size=(2, 2)))
    model.add(ZeroPadding2D(padding=((0, 0), (0, 1))))

    model.add(
        Conv2DTranspose(4 * n_conv_features, (3, 3),
                        activation='relu',
                        padding="same"))
    model.add(
        Conv2DTranspose(4 * n_conv_features, (3, 3),
                        activation='relu',
                        padding="same"))
    model.add(UpSampling2D(size=(2, 2)))
    model.add(ZeroPadding2D(padding=((0, 0), (0, 1))))
    model.add(BatchNormalization())
    model.add(Dropout(0.25))

    model.add(
        Conv2DTranspose(2 * n_conv_features, (5, 5),
                        activation='relu',
                        padding="same"))
    model.add(
        Conv2DTranspose(2 * n_conv_features, (5, 5),
                        activation='relu',
                        padding="same"))
    model.add(UpSampling2D(size=(2, 2)))
    model.add(BatchNormalization())
    model.add(Dropout(0.25))

    model.add(
        Conv2DTranspose(n_conv_features, (5, 5),
                        activation='relu',
                        padding="same"))
    model.add(
        Conv2DTranspose(n_conv_features, (5, 5),
                        activation='relu',
                        padding="same"))
    model.add(Dropout(0.25))

    # Matrice produite
    model.add(Conv2DTranspose(3, (5, 5), activation='linear', padding="same"))
    model.add(Cropping2D(cropping=((0, 1), (0, 1))))

    # Résumé du réseau
    model.summary()

    return (model)
Exemplo n.º 20
0
def yolo4_body(inputs, num_anchors, num_classes):
    backbone = Model(inputs, darknet_body(inputs))

    y19 = DarknetConv2D_BN_Leaky(512, (1, 1))(backbone.output)
    y19 = DarknetConv2D_BN_Leaky(1024, (3, 3))(y19)
    y19 = DarknetConv2D_BN_Leaky(512, (1, 1))(y19)
    maxpool1 = MaxPooling2D(pool_size=(13, 13), strides=(1, 1), padding='same')(y19)
    maxpool2 = MaxPooling2D(pool_size=(9, 9), strides=(1, 1), padding='same')(y19)
    maxpool3 = MaxPooling2D(pool_size=(5, 5), strides=(1, 1), padding='same')(y19)
    y19 = Concatenate()([maxpool1, maxpool2, maxpool3, y19])
    y19 = DarknetConv2D_BN_Leaky(512, (1, 1))(y19)
    y19 = DarknetConv2D_BN_Leaky(1024, (3, 3))(y19)
    y19 = DarknetConv2D_BN_Leaky(512, (1, 1))(y19)

    y19_upsample = compose(DarknetConv2D_BN_Leaky(256, (1, 1)), UpSampling2D(2))(y19)

    y38 = DarknetConv2D_BN_Leaky(256, (1, 1))(backbone.layers[204].output)
    y38 = Concatenate()([y38, y19_upsample])
    y38 = DarknetConv2D_BN_Leaky(256, (1, 1))(y38)
    y38 = DarknetConv2D_BN_Leaky(512, (3, 3))(y38)
    y38 = DarknetConv2D_BN_Leaky(256, (1, 1))(y38)
    y38 = DarknetConv2D_BN_Leaky(512, (3, 3))(y38)
    y38 = DarknetConv2D_BN_Leaky(256, (1, 1))(y38)

    y38_upsample = compose(DarknetConv2D_BN_Leaky(128, (1, 1)), UpSampling2D(2))(y38)

    y76 = DarknetConv2D_BN_Leaky(128, (1, 1))(backbone.layers[131].output)
    y76 = Concatenate()([y76, y38_upsample])
    y76 = DarknetConv2D_BN_Leaky(128, (1, 1))(y76)
    y76 = DarknetConv2D_BN_Leaky(256, (3, 3))(y76)
    y76 = DarknetConv2D_BN_Leaky(128, (1, 1))(y76)
    y76 = DarknetConv2D_BN_Leaky(256, (3, 3))(y76)
    y76 = DarknetConv2D_BN_Leaky(128, (1, 1))(y76)

    y76_output = DarknetConv2D_BN_Leaky(256, (3, 3))(y76)
    y76_output = DarknetConv2D(num_anchors*(num_classes+5), (1, 1))(y76_output)

    y76_downsample = ZeroPadding2D(((1, 0), (1, 0)))(y76)
    y76_downsample = DarknetConv2D_BN_Leaky(256, (3, 3), strides=(2, 2))(y76_downsample)
    y38 = Concatenate()([y76_downsample, y38])
    y38 = DarknetConv2D_BN_Leaky(256, (1,1))(y38)
    y38 = DarknetConv2D_BN_Leaky(512, (3,3))(y38)
    y38 = DarknetConv2D_BN_Leaky(256, (1,1))(y38)
    y38 = DarknetConv2D_BN_Leaky(512, (3,3))(y38)
    y38 = DarknetConv2D_BN_Leaky(256, (1,1))(y38)

    y38_output = DarknetConv2D_BN_Leaky(512, (3, 3))(y38)
    y38_output = DarknetConv2D(num_anchors*(num_classes+5), (1,1))(y38_output)

    y38_downsample = ZeroPadding2D(((1, 0), (1, 0)))(y38)
    y38_downsample = DarknetConv2D_BN_Leaky(512, (3,3), strides=(2,2))(y38_downsample)
    y19 = Concatenate()([y38_downsample, y19])
    y19 = DarknetConv2D_BN_Leaky(512, (1, 1))(y19)
    y19 = DarknetConv2D_BN_Leaky(1024, (3, 3))(y19)
    y19 = DarknetConv2D_BN_Leaky(512, (1, 1))(y19)
    y19 = DarknetConv2D_BN_Leaky(1024, (3, 3))(y19)
    y19 = DarknetConv2D_BN_Leaky(512, (1, 1))(y19)

    y19_output = DarknetConv2D_BN_Leaky(1024, (3, 3))(y19)
    y19_output = DarknetConv2D(num_anchors*(num_classes+5), (1,1))(y19_output)

    yolo4_model = Model(inputs, [y19_output, y38_output, y76_output])

    return yolo4_model
                                                         alpha=1e-4,
                                                         beta=0.75)
pool_2_frames = MaxPooling2D((2, 2))(batch_norm_2_frames)
#Layer3
conv_3_frames = Conv2D(32, (3, 3), activation='relu',
                       padding='same')(pool_2_frames)
#Layer4
conv_4_frames = Conv2D(32, (3, 3), activation='relu',
                       padding='same')(conv_3_frames)
#Layer5
conv_5_frames = Conv2D(32, (3, 3), activation='relu',
                       padding='same')(conv_4_frames)
pool_3_frames = MaxPooling2D((2, 2))(conv_5_frames)

#Frames Decoder
pool_3_de_frames = UpSampling2D((2, 2))(pool_3_frames)
conv_5_de_frames = Conv2DTranspose(32, (3, 3),
                                   activation='relu',
                                   padding='same')(pool_3_de_frames)
conv_4_de_frames = Conv2DTranspose(32, (3, 3),
                                   activation='relu',
                                   padding='same')(conv_5_de_frames)
conv_3_de_frames = Conv2DTranspose(32, (3, 3),
                                   activation='relu',
                                   padding='same')(conv_4_de_frames)
pool_2_de_frames = UpSampling2D((2, 2))(conv_3_de_frames)
conv_2_de_frames = Conv2DTranspose(32, (3, 3),
                                   activation='relu',
                                   padding='same')(pool_2_de_frames)
batch_norm_2_frames = tf.nn.local_response_normalization(conv_2_de_frames,
                                                         depth_radius=5,
Exemplo n.º 22
0
def conrec_model(input_shape=(256, 256, 1),
                 basemap=32,
                 activation='sigmoid',
                 depth=4,
                 p_dropout=None,
                 batch_normalization=True,
                 projection_dim=128,
                 projection_head_layers=3,
                 skip_connections=None,
                 encoder_reduction=EncoderReduction.GA_POOLING,
                 decoder_type=DecoderType.UPSAMPLING,
                 sc_strength=1):
    def _pool_and_dropout(pool_size, p_dropout, inp):
        """helper fcn to easily add optional dropout"""
        if p_dropout:
            pool = MaxPooling2D(pool_size=pool_size)(inp)
            return Dropout(p_dropout)(pool)
        else:
            return MaxPooling2D(pool_size=pool_size)(inp)

    if skip_connections is None:
        skip_connections = depth - 1

    inputs = Input(input_shape)
    current_layer = inputs
    levels = list()

    for layer_depth in range(depth):
        x = current_layer
        for _ in range(2):
            x = _create_convolution_block(
                input_layer=x,
                n_filters=basemap * (2**layer_depth),
                kernel=(3, 3),
                batch_normalization=batch_normalization,
                use_bias=True)
        if layer_depth < depth - 1:
            x = Layer(name='sc-' + str(layer_depth))(x)
            skip = _create_convolution_block(
                input_layer=x,
                n_filters=x.shape[-1] * sc_strength,
                kernel=(1, 1),
                batch_normalization=batch_normalization,
                use_bias=True)
            levels.append(skip)
            current_layer = _pool_and_dropout(pool_size=(2, 2),
                                              p_dropout=p_dropout,
                                              inp=x)
        else:
            x = Dropout(p_dropout)(x) if p_dropout else x
            current_layer = x

    reduced = reduce_encoder_output(encoder_output=current_layer,
                                    encoder_reduction=encoder_reduction)
    reduced = Layer(name=ENCODER_OUTPUT_NAME)(reduced)

    con_output = add_contrastive_output(
        input=reduced,
        projection_dim=projection_dim,
        projection_head_layers=projection_head_layers)

    for layer_depth in range(depth - 2, -1, -1):
        if decoder_type == DecoderType.TRANSPOSE:
            x = Conv2DTranspose(basemap * (2**layer_depth), (2, 2),
                                strides=(2, 2),
                                padding='same')(current_layer)
        elif decoder_type == DecoderType.UPSAMPLING:
            x = UpSampling2D(size=(2, 2))(current_layer)
        else:
            raise ValueError('Unknown decoder type')
        if skip_connections > layer_depth:
            x = concatenate([x, levels[layer_depth]], axis=3)
        else:
            print('No skip connection')
        for _ in range(2):
            x = _create_convolution_block(
                input_layer=x,
                n_filters=basemap * (2**layer_depth),
                kernel=(3, 3),
                batch_normalization=batch_normalization,
                use_bias=True)
        current_layer = Dropout(p_dropout)(x) if p_dropout else x

    reconstruction_out = Conv2D(input_shape[-1], (1, 1),
                                activation=activation,
                                name=RECONSTRUCTION_OUTPUT)(current_layer)

    return Model(inputs, [reconstruction_out, con_output])
input_img = Input(shape=(32, 32, 3))
print('Number of testing images:', x_test.shape)

# In[3]:

x = Conv2D(64, (3, 3), activation='relu', padding='same')(input_img)
x = MaxPooling2D((2, 2), padding='same')(x)
x = Conv2D(128, (3, 3), activation='relu', padding='same')(x)
x = MaxPooling2D((2, 2), padding='same')(x)
x = Conv2D(256, (3, 3), activation='relu', padding='same')(x)
encoded = MaxPooling2D((2, 2), padding='same')(x)

# In[4]:

x = Conv2D(256, (3, 3), activation='relu', padding='same')(encoded)
x = UpSampling2D((2, 2))(x)
x = Conv2D(128, (3, 3), activation='relu', padding='same')(x)
x = UpSampling2D((2, 2))(x)
x = Conv2D(64, (3, 3), activation='relu', padding='same')(x)
x = UpSampling2D((2, 2))(x)
decoded = Conv2D(3, (3, 3), activation='sigmoid', padding='same')(x)

# In[5]:

autoencoder = Model(input_img, decoded)
autoencoder.compile(optimizer='adam', loss='mae')

# In[6]:

autoencoder.fit(x_train,
                x_train,
# Block  encoder 4
max_pool_enc_4 = MaxPooling2D(pool_size=(2, 2))(conv_enc_3)
conv_enc_4 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = initializer)(max_pool_enc_4)
conv_enc_4 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = initializer)(conv_enc_4)
# -- Encoder -- #

# ----------- #
maxpool = MaxPooling2D(pool_size=(2, 2))(conv_enc_4)
conv = Conv2D(1024, 3, activation = 'relu', padding = 'same', kernel_initializer = initializer)(maxpool)
conv = Conv2D(1024, 3, activation = 'relu', padding = 'same', kernel_initializer = initializer)(conv)
# ----------- #

# -- Decoder -- #
# Block decoder 1
up_dec_1 = Conv2D(512, 2, activation = 'relu', padding = 'same', kernel_initializer = initializer)(UpSampling2D(size = (2,2))(conv))
merge_dec_1 = concatenate([conv_enc_4, up_dec_1], axis = 3)
conv_dec_1 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = initializer)(merge_dec_1)
conv_dec_1 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = initializer)(conv_dec_1)

# Block decoder 2
up_dec_2 = Conv2D(256, 2, activation = 'relu', padding = 'same', kernel_initializer = initializer)(UpSampling2D(size = (2,2))(conv_dec_1))
merge_dec_2 = concatenate([conv_enc_3, up_dec_2], axis = 3)
conv_dec_2 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = initializer)(merge_dec_2)
conv_dec_2 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = initializer)(conv_dec_2)

# Block decoder 3
up_dec_3 = Conv2D(128, 2, activation = 'relu', padding = 'same', kernel_initializer = initializer)(UpSampling2D(size = (2,2))(conv_dec_2))
merge_dec_3 = concatenate([conv_enc_2, up_dec_3], axis = 3)
conv_dec_3 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = initializer)(merge_dec_3)
conv_dec_3 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = initializer)(conv_dec_3)
Exemplo n.º 25
0
model = Sequential()
model.add(
    Conv2D(32, (3, 3),
           activation='relu',
           padding='same',
           input_shape=(SIZE, SIZE, 3)))
model.add(MaxPooling2D((2, 2), padding='same'))
model.add(Conv2D(8, (3, 3), activation='relu', padding='same'))
model.add(MaxPooling2D((2, 2), padding='same'))
model.add(Conv2D(8, (3, 3), activation='relu', padding='same'))

model.add(MaxPooling2D((2, 2), padding='same'))

model.add(Conv2D(8, (3, 3), activation='relu', padding='same'))
model.add(UpSampling2D((2, 2)))
model.add(Conv2D(8, (3, 3), activation='relu', padding='same'))
model.add(UpSampling2D((2, 2)))
model.add(Conv2D(32, (3, 3), activation='relu', padding='same'))
model.add(UpSampling2D((2, 2)))
model.add(Conv2D(3, (3, 3), activation='relu', padding='same'))

model.compile(optimizer='adam',
              loss='mean_squared_error',
              metrics=['accuracy'])
model.summary()

model.fit(img_array, img_array, epochs=5000, shuffle=True)

print("Neural network output")
pred = model.predict(img_array)
Exemplo n.º 26
0
from numpy import asarray

#from keras.models import Sequential
from tensorflow.keras.models import Sequential

#from keras.layers import UpSampling2D
from tensorflow.keras.layers import UpSampling2D

X = asarray([[1, 2], [3, 4]])

X = asarray([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
print(X)
nr = X.shape[0]
nc = X.shape[1]

# reshape input data into one sample a sample with a channel
X = X.reshape((1, nr, nc, 1))

model = Sequential()
model.add(UpSampling2D(input_shape=(nr, nc, 1)))  # nearest neighbor

yhat = model.predict(X)
yhat = yhat.reshape((2 * nr, 2 * nc))
print(yhat)

model = Sequential()
model.add(UpSampling2D(input_shape=(nc, nc, 1), interpolation='bilinear'))

yhat = model.predict(X)
yhat = yhat.reshape((2 * nr, 2 * nc))
print(yhat)
Exemplo n.º 27
0
# import mnist data and visualize first image
(X_train, y_train), (X_test, y_test) = mnist.load_data()

# scale data and reshape
X_train = X_train.astype('float32')

X_train = X_train / 127.5 - 1.

X_train = X_train.reshape(-1,28,28,1)

# generator
modelG = Sequential()
modelG.add(Dense(128 * 7 * 7, activation="relu", input_dim=100))
modelG.add(Reshape((7, 7, 128)))
modelG.add(UpSampling2D())
modelG.add(Conv2D(128, kernel_size=3, padding="same"))
modelG.add(BatchNormalization(momentum=0.8))
modelG.add(Activation("relu"))
modelG.add(UpSampling2D())
modelG.add(Conv2D(64, kernel_size=3, padding="same"))
modelG.add(BatchNormalization(momentum=0.8))
modelG.add(Activation("relu"))
modelG.add(Conv2D(1, kernel_size=3, padding="same"))
modelG.add(Activation("tanh"))

# discriminator
modelD = Sequential()
modelD.add(Conv2D(32, kernel_size=3, strides=2, input_shape=(28,28,1), padding="same"))
modelD.add(LeakyReLU(alpha=0.2))
modelD.add(Dropout(0.25))
Exemplo n.º 28
0
def FashionMnist_classifier_middle_bn():
    """
    The architecture of the coarse & middle model
    """
    input_shape = (28, 28, 1)
    input_img = Input(shape=input_shape, name="Input", dtype='float32')
    conv1 = Conv2D(2, (3, 3), padding='same', name="conv2d_1",
                   trainable=True)(input_img)
    conv1 = (BatchNormalization(name='batch_normalization'))(conv1)
    conv1 = (Activation('relu', name='activation'))(conv1)

    conv2 = Conv2D(4, (3, 3), padding='same', name="conv2d_2",
                   trainable=True)(conv1)
    conv2 = (BatchNormalization(name='batch_normalization_1'))(conv2)
    conv2 = (Activation('relu', name='activation_1'))(conv2)

    conv2bis = MaxPooling2D(pool_size=(2, 2), name="max_pooling2d_1")(conv2)
    conv3 = Conv2D(8, (3, 3), padding='same', name="conv2d_3",
                   trainable=True)(conv2bis)
    conv3 = (BatchNormalization(name='batch_normalization_2'))(conv3)
    conv3 = (Activation('relu', name='activation_2'))(conv3)

    conv3bis = MaxPooling2D(pool_size=(2, 2), name="max_pooling2d_2")(conv3)
    conv4 = Conv2D(16, (3, 3), padding='same', name="conv2d_4",
                   trainable=True)(conv3bis)
    conv4 = (BatchNormalization(name='batch_normalization_3'))(conv4)
    conv4 = (Activation('relu', name='activation_3'))(conv4)

    conv4bis = MaxPooling2D(pool_size=(2, 2), name="max_pooling2d_3")(conv4)
    conv5 = Conv2D(32, (3, 3), padding='same', name="conv2d_5",
                   trainable=True)(conv4bis)
    conv5 = (BatchNormalization(name='batch_normalization_4'))(conv5)
    conv5 = (Activation('relu', name='activation_4'))(conv5)

    d1 = GlobalAveragePooling2D()(conv5)
    res1 = Dense(2, name="fc1")(d1)
    res1 = Activation('softmax', name='coarse')(res1)

    conv5bis = UpSampling2D(size=(2, 2), name='up_sampling2d_1')(conv5)
    conv4tris = Cropping2D(cropping=((1, 0), (1, 0)))(conv4)
    conv6 = Concatenate(name='concatenate_1', axis=3)([conv5bis, conv4tris])
    conv7 = Conv2D(16, (3, 3), padding='same', name="conv2d_6",
                   trainable=True)(conv6)
    conv7 = (BatchNormalization(name='batch_normalization_5',
                                trainable=True))(conv7)
    conv7 = (Activation('relu', name='activation_5'))(conv7)

    d2 = GlobalAveragePooling2D()(conv7)
    res2 = Dense(4, name="fc2", trainable=True)(d2)
    res2 = Activation('softmax', name='middle')(res2)

    conv7bis = UpSampling2D(size=(2, 2), name='up_sampling2d_2')(conv7)
    conv3tris = Cropping2D(cropping=((1, 1), (1, 1)))(conv3)
    conv8 = Concatenate(name='concatenate_2', axis=3)([conv7bis, conv3tris])
    conv9 = Conv2D(16, (3, 3),
                   padding='same',
                   name="conv2d_7",
                   trainable=False)(conv8)
    conv9 = (BatchNormalization(name='batch_normalization_6',
                                trainable=False))(conv9)
    conv9 = (Activation('relu', name='activation_6'))(conv9)

    d3 = GlobalAveragePooling2D()(conv9)
    res3 = Dense(10, name="fc3", trainable=False)(d3)
    res3 = Activation('softmax', name='fine')(res3)
    final_result = [res1, res2, res3, d1, d2, d3]
    model = Model(inputs=input_img, outputs=final_result)
    return (model)
# Colors = 1 for grayscale
# Fashion MNIST is grayscale

classes = len(set(y_train))
print(classes)

X_train[0].shape

input_shape = X_train[0].shape

i_layer = Input(shape=input_shape)
h_layer = Conv2D(16, (3, 3), activation='relu', padding='same')(i_layer)
h_layer = MaxPool2D((2, 2), padding='same')(h_layer)
h_layer = Conv2D(8, (3, 3), activation='relu', padding='same')(h_layer)
h_layer = MaxPool2D((2, 2), padding='same')(h_layer)
h_layer = UpSampling2D((2, 2))(h_layer)
h_layer = Conv2D(8, (3, 3), activation='relu', padding='same')(h_layer)
h_layer = UpSampling2D((2, 2))(h_layer)
h_layer = Conv2D(16, (3, 3), activation='relu', padding='same')(h_layer)
o_layer = Conv2D(1, (3, 3), activation=None, padding='same')(h_layer)
model = Model(i_layer, o_layer)

model.summary()

model.compile(optimizer='adam', loss="mse")

report = model.fit(X_train, X_train, epochs=10, batch_size=200)

idx = np.random.randint(0, len(X_train))
fig, ax = plt.subplots(1, 2, figsize=(10, 4))
ax[0].imshow(X_train[idx].reshape(28, 28), cmap='gray')
Exemplo n.º 30
0
def bifpn(features, out_channels, ids, training=True):
    # The first Bifpn layer
    if ids == 0:
        _, _, c3, c4, c5 = features
        p3 = Conv2D(out_channels, 1, 1, padding='same')(c3)
        p3 = BatchNormalization(momentum=MOMENTUM,
                                epsilon=EPSILON)(p3, training=training)

        p4 = Conv2D(out_channels, 1, 1, padding='same')(c4)
        p4 = BatchNormalization(momentum=MOMENTUM,
                                epsilon=EPSILON)(p4, training=training)

        p5 = Conv2D(out_channels, 1, 1, padding='same')(c5)
        p5 = BatchNormalization(momentum=MOMENTUM,
                                epsilon=EPSILON)(p5, training=training)

        p6 = Conv2D(out_channels, 1, 1, padding='same')(c5)
        p6 = BatchNormalization(momentum=MOMENTUM,
                                epsilon=EPSILON)(p6, training=training)
        p6 = MaxPool2D(3, 2, padding='same')(p6)

        p7 = MaxPool2D(3, 2, padding='same')(p6)
        p7_up = UpSampling2D(2)(p7)

        p6_middle = WeightAdd()([p6, p7_up])
        p6_middle = Swish()(p6_middle)
        p6_middle = SeparableConv2D(out_channels, 3, 1,
                                    padding='same')(p6_middle)
        p6_middle = BatchNormalization(momentum=MOMENTUM,
                                       epsilon=EPSILON)(p6_middle,
                                                        training=training)
        p6_up = UpSampling2D(2)(p6_middle)

        p5_middle = WeightAdd()([p5, p6_up])
        p5_middle = Swish()(p5_middle)
        p5_middle = SeparableConv2D(out_channels, 3, padding='same')(p5_middle)
        p5_middle = BatchNormalization(momentum=MOMENTUM,
                                       epsilon=EPSILON)(p5_middle,
                                                        training=training)
        p5_up = UpSampling2D(2)(p5_middle)

        p4_middle = WeightAdd()([p4, p5_up])
        p4_middle = Swish()(p4_middle)
        p4_middle = SeparableConv2D(out_channels, 3, padding='same')(p4_middle)
        p4_middle = BatchNormalization(momentum=MOMENTUM,
                                       epsilon=EPSILON)(p4_middle,
                                                        training=training)
        p4_up = UpSampling2D(2)(p4_middle)

        p3_out = WeightAdd()([p3, p4_up])
        p3_out = Swish()(p3_out)
        p3_out = SeparableConv2D(out_channels, 3, padding='same')(p3_out)
        p3_out = BatchNormalization(momentum=MOMENTUM,
                                    epsilon=EPSILON)(p3_out, training=training)
        p3_down = MaxPool2D(3, strides=2, padding='same')(p3_out)

        # path aggregation
        p4_out = WeightAdd()([p4, p4_middle, p3_down])
        p4_out = Swish()(p4_out)
        p4_out = SeparableConv2D(out_channels,
                                 kernel_size=3,
                                 strides=1,
                                 padding='same')(p4_out)
        p4_out = BatchNormalization(momentum=MOMENTUM,
                                    epsilon=EPSILON)(p4_out, training=training)
        p4_down = MaxPool2D(pool_size=3, strides=2, padding='same')(p4_out)

        p5_out = WeightAdd()([p5, p5_middle, p4_down])
        p5_out = Swish()(p5_out)
        p5_out = SeparableConv2D(out_channels, 3, 1, padding='same')(p5_out)
        p5_out = BatchNormalization(momentum=MOMENTUM,
                                    epsilon=EPSILON)(p5_out, training=training)
        p5_down = MaxPool2D(3, strides=2, padding='same')(p5_out)

        p6_out = WeightAdd()([p6, p6_middle, p5_down])
        p6_out = Swish()(p6_out)
        p6_out = SeparableConv2D(out_channels,
                                 kernel_size=3,
                                 strides=1,
                                 padding='same')(p6_out)
        p6_out = BatchNormalization(momentum=MOMENTUM,
                                    epsilon=EPSILON)(p6_out, training=training)
        p6_down = MaxPool2D(pool_size=3, strides=2, padding='same')(p6_out)

        p7_out = WeightAdd()([p7, p6_down])
        p7_out = Swish()(p7_out)
        p7_out = SeparableConv2D(out_channels,
                                 kernel_size=3,
                                 strides=1,
                                 padding='same')(p7_out)
        p7_out = BatchNormalization(momentum=MOMENTUM,
                                    epsilon=EPSILON)(p7_out, training=training)

    # Not the first Bifpn layer
    else:
        p3, p4, p5, p6, p7 = features

        p7_up = UpSampling2D(2)(p7)

        p6_middle = WeightAdd()([p6, p7_up])
        p6_middle = Swish()(p6_middle)
        p6_middle = SeparableConv2D(out_channels, 3, 1,
                                    padding='same')(p6_middle)
        p6_middle = BatchNormalization(momentum=MOMENTUM,
                                       epsilon=EPSILON)(p6_middle,
                                                        training=training)
        p6_up = UpSampling2D(2)(p6_middle)

        p5_middle = WeightAdd()([p5, p6_up])
        p5_middle = Swish()(p5_middle)
        p5_middle = SeparableConv2D(out_channels, 3, padding='same')(p5_middle)
        p5_middle = BatchNormalization(momentum=MOMENTUM,
                                       epsilon=EPSILON)(p5_middle,
                                                        training=training)
        p5_up = UpSampling2D(2)(p5_middle)

        p4_middle = WeightAdd()([p4, p5_up])
        p4_middle = Swish()(p4_middle)
        p4_middle = SeparableConv2D(out_channels, 3, padding='same')(p4_middle)
        p4_middle = BatchNormalization(momentum=MOMENTUM,
                                       epsilon=EPSILON)(p4_middle,
                                                        training=training)
        p4_up = UpSampling2D(2)(p4_middle)

        p3_out = WeightAdd()([p3, p4_up])
        p3_out = Swish()(p3_out)
        p3_out = SeparableConv2D(out_channels, 3, padding='same')(p3_out)
        p3_out = BatchNormalization(momentum=MOMENTUM,
                                    epsilon=EPSILON)(p3_out, training=training)
        p3_down = MaxPool2D(3, strides=2, padding='same')(p3_out)

        # path aggregation
        p4_out = WeightAdd()([p4, p4_middle, p3_down])
        p4_out = Swish()(p4_out)
        p4_out = SeparableConv2D(out_channels,
                                 kernel_size=3,
                                 strides=1,
                                 padding='same')(p4_out)
        p4_out = BatchNormalization(momentum=MOMENTUM,
                                    epsilon=EPSILON)(p4_out, training=training)
        p4_down = MaxPool2D(pool_size=3, strides=2, padding='same')(p4_out)

        p5_out = WeightAdd()([p5, p5_middle, p4_down])
        p5_out = Swish()(p5_out)
        p5_out = SeparableConv2D(out_channels, 3, 1, padding='same')(p5_out)
        p5_out = BatchNormalization(momentum=MOMENTUM,
                                    epsilon=EPSILON)(p5_out, training=training)
        p5_down = MaxPool2D(3, strides=2, padding='same')(p5_out)

        p6_out = WeightAdd()([p6, p6_middle, p5_down])
        p6_out = Swish()(p6_out)
        p6_out = SeparableConv2D(out_channels,
                                 kernel_size=3,
                                 strides=1,
                                 padding='same')(p6_out)
        p6_out = BatchNormalization(momentum=MOMENTUM,
                                    epsilon=EPSILON)(p6_out, training=training)
        p6_down = MaxPool2D(pool_size=3, strides=2, padding='same')(p6_out)

        p7_out = WeightAdd()([p7, p6_down])
        p7_out = Swish()(p7_out)
        p7_out = SeparableConv2D(out_channels,
                                 kernel_size=3,
                                 strides=1,
                                 padding='same')(p7_out)
        p7_out = BatchNormalization(momentum=MOMENTUM,
                                    epsilon=EPSILON)(p7_out, training=training)

    return p3_out, p4_out, p5_out, p6_out, p7_out