Exemple #1
0
    def build(self):
        # ======================================== INPUT ==========================================
        inBlock = Input(shape=(self.input_h, self.input_w, 3), dtype='float32')
        # Lambda layer: scale input before feeding the network
        inScaled = Lambda(lambda x: scale_input(x))(inBlock)
        # ======================================== ENCODER ========================================
        # Block 1d
        convB1d = Conv2D(64, (3, 3),
                         activation=self.activation,
                         kernel_initializer=self.kernel_init,
                         padding='same')(inScaled)
        convB1d = BatchNormalization()(convB1d)
        convB1d = Conv2D(64, (3, 3),
                         activation=self.activation,
                         kernel_initializer=self.kernel_init,
                         padding='same')(convB1d)
        convB1d = BatchNormalization()(convB1d)
        poolB1d = MaxPooling2D(pool_size=(2, 2))(convB1d)
        # Block 2d
        convB2d = Conv2D(128, (3, 3),
                         activation=self.activation,
                         kernel_initializer=self.kernel_init,
                         padding='same')(poolB1d)
        convB2d = BatchNormalization()(convB2d)
        convB2d = Conv2D(128, (3, 3),
                         activation=self.activation,
                         kernel_initializer=self.kernel_init,
                         padding='same')(convB2d)
        convB2d = BatchNormalization()(convB2d)
        poolB2d = MaxPooling2D(pool_size=(2, 2))(convB2d)
        # Block 3d
        convB3d = Conv2D(256, (3, 3),
                         activation=self.activation,
                         kernel_initializer=self.kernel_init,
                         padding='same')(poolB2d)
        convB3d = BatchNormalization()(convB3d)
        convB3d = Conv2D(256, (3, 3),
                         activation=self.activation,
                         kernel_initializer=self.kernel_init,
                         padding='same')(convB3d)
        convB3d = BatchNormalization()(convB3d)
        poolB3d = MaxPooling2D(pool_size=(2, 2))(convB3d)
        # Block 4d
        convB4d = Conv2D(512, (3, 3),
                         activation=self.activation,
                         kernel_initializer=self.kernel_init,
                         padding='same')(poolB3d)
        convB4d = BatchNormalization()(convB4d)
        convB4d = Conv2D(512, (3, 3),
                         activation=self.activation,
                         kernel_initializer=self.kernel_init,
                         padding='same')(convB4d)
        convB4d = BatchNormalization()(convB4d)
        poolB4d = MaxPooling2D(pool_size=(2, 2))(convB4d)

        # ===================================== BOTTLENECK ======================================
        # Implementation of Dense-Block
        stackBtN = poolB4d
        for i in range(self.dense_block_size):
            # DB: Dense-Block
            l = self.dense_block(stackBtN, self.dense_filter_size)
            stackBtN = concatenate([stackBtN, l])

        # ====================================== DECODER =======================================
        # Block 4u
        convB4u = Conv2DTranspose(512, (2, 2), strides=(2, 2),
                                  padding='same')(stackBtN)
        convB4u = concatenate([convB4u, convB4d])
        convB4u = Conv2D(512, (3, 3),
                         activation=self.activation,
                         kernel_initializer=self.kernel_init,
                         padding='same')(convB4u)
        convB4u = BatchNormalization()(convB4u)
        convB4u = Conv2D(256, (3, 3),
                         activation=self.activation,
                         kernel_initializer=self.kernel_init,
                         padding='same')(convB4u)
        convB4u = BatchNormalization()(convB4u)
        # Block 3u
        convB3u = Conv2DTranspose(256, (2, 2), strides=(2, 2),
                                  padding='same')(convB4u)
        convB3u = concatenate([convB3u, convB3d])
        convB3u = Conv2D(256, (3, 3),
                         activation=self.activation,
                         kernel_initializer=self.kernel_init,
                         padding='same')(convB3u)
        convB3u = BatchNormalization()(convB3u)
        convB3u = Conv2D(128, (3, 3),
                         activation=self.activation,
                         kernel_initializer=self.kernel_init,
                         padding='same')(convB3u)
        convB3u = BatchNormalization()(convB3u)
        # Block B2u
        convB2u = Conv2DTranspose(128, (2, 2), strides=(2, 2),
                                  padding='same')(convB3u)
        convB2u = concatenate([convB2u, convB2d])
        convB2u = Conv2D(128, (3, 3),
                         activation=self.activation,
                         kernel_initializer=self.kernel_init,
                         padding='same')(convB2u)
        convB2u = BatchNormalization()(convB2u)
        convB2u = Conv2D(64, (3, 3),
                         activation=self.activation,
                         kernel_initializer=self.kernel_init,
                         padding='same')(convB2u)
        convB2u = BatchNormalization()(convB2u)
        # Block B1u
        convB1u = Conv2DTranspose(64, (2, 2), strides=(2, 2),
                                  padding='same')(convB2u)
        convB1u = concatenate([convB1u, convB1d], axis=3)
        convB1u = Conv2D(64, (3, 3),
                         activation=self.activation,
                         kernel_initializer=self.kernel_init,
                         padding='same')(convB1u)
        convB1u = BatchNormalization()(convB1u)
        convB1u = Conv2D(64, (3, 3),
                         activation=self.activation,
                         kernel_initializer=self.kernel_init,
                         padding='same')(convB1u)
        convB1u = BatchNormalization()(convB1u)

        # ======================================== OUTPUT ==========================================
        if self.n_classes == 2:
            outBlock = Conv2D(1, (1, 1), activation='sigmoid',
                              padding='same')(convB1u)
        else:
            outBlock = Conv2D(self.n_classes, (1, 1),
                              activation='softmax',
                              padding='same')(convB1u)

        # Create model
        model = Model(inputs=inBlock, outputs=outBlock, name=self.model_name)
        model.compile(optimizer=Adam(),
                      loss="categorical_crossentropy",
                      metrics=[
                          dice,
                          jaccard,
                      ])

        # Load models_weights if pre-trained
        if self.pre_trained:
            if os.path.exists(self.weights_path):
                model.load_weights(self.weights_path)
            else:
                raise Exception(
                    f'Failed to load weights at {self.weights_path}')

        return model
Exemple #2
0
    def build(self):
        if not (self.backbone in {'xception', 'mobilenetv2'}):
            raise ValueError(
                'The `backbone` argument should be either `xception`  or `mobilenetv2` '
            )

        img_input = Input(shape=(self.input_h, self.input_w, 3))
        # Lambda layer: scale input before feeding to the network
        batches_input = Lambda(lambda x: scale_input(x))(img_input)

        if self.backbone == 'xception':
            if self.OS == 8:
                entry_block3_stride = 1
                middle_block_rate = 2  # ! Not mentioned in paper, but required
                exit_block_rates = (2, 4)
                atrous_rates = (12, 24, 36)
            else:
                entry_block3_stride = 2
                middle_block_rate = 1
                exit_block_rates = (1, 2)
                atrous_rates = (6, 12, 18)
            x = Conv2D(32, (3, 3),
                       strides=(2, 2),
                       use_bias=False,
                       padding='same')(batches_input)

            x = BatchNormalization(name='entry_flow_conv1_1_BN')(x)
            x = Activation(self.activation)(x)

            x = self.conv2d_same(x, 64, kernel_size=3, stride=1)
            x = BatchNormalization()(x)
            x = Activation(self.activation)(x)

            x = self.xception_block(x, [128, 128, 128],
                                    skip_connect_type='conv',
                                    stride=2,
                                    depth_activation=False)
            x, skip1 = self.xception_block(x, [256, 256, 256],
                                           skip_connect_type='conv',
                                           stride=2,
                                           depth_activation=False,
                                           return_skip=True)

            x = self.xception_block(x, [728, 728, 728],
                                    skip_connect_type='conv',
                                    stride=entry_block3_stride,
                                    depth_activation=False)
            for i in range(16):
                x = self.xception_block(x, [728, 728, 728],
                                        skip_connect_type='sum',
                                        stride=1,
                                        rate=middle_block_rate,
                                        depth_activation=False)

            x = self.xception_block(x, [728, 1024, 1024],
                                    skip_connect_type='conv',
                                    stride=1,
                                    rate=exit_block_rates[0],
                                    depth_activation=False)
            x = self.xception_block(x, [1536, 1536, 2048],
                                    skip_connect_type='none',
                                    stride=1,
                                    rate=exit_block_rates[1],
                                    depth_activation=True)

        # Backbone='mobilenetv2'
        else:
            self.OS = 8
            first_block_filters = make_divisible(32 * self.alpha, 8)
            x = Conv2D(first_block_filters,
                       kernel_size=3,
                       strides=(2, 2),
                       padding='same',
                       use_bias=False)(batches_input)
            x = BatchNormalization(epsilon=1e-3, momentum=0.999)(x)

            x = Lambda(lambda x: relu(x, max_value=6.))(x)

            x = self.inverted_res_block(x,
                                        filters=16,
                                        alpha=self.alpha,
                                        stride=1,
                                        expansion=1,
                                        block_id=0,
                                        skip_connection=False)

            x = self.inverted_res_block(x,
                                        filters=24,
                                        alpha=self.alpha,
                                        stride=2,
                                        expansion=6,
                                        block_id=1,
                                        skip_connection=False)
            x = self.inverted_res_block(x,
                                        filters=24,
                                        alpha=self.alpha,
                                        stride=1,
                                        expansion=6,
                                        block_id=2,
                                        skip_connection=True)

            x = self.inverted_res_block(x,
                                        filters=32,
                                        alpha=self.alpha,
                                        stride=2,
                                        expansion=6,
                                        block_id=3,
                                        skip_connection=False)
            x = self.inverted_res_block(x,
                                        filters=32,
                                        alpha=self.alpha,
                                        stride=1,
                                        expansion=6,
                                        block_id=4,
                                        skip_connection=True)
            x = self.inverted_res_block(x,
                                        filters=32,
                                        alpha=self.alpha,
                                        stride=1,
                                        expansion=6,
                                        block_id=5,
                                        skip_connection=True)

            # stride in block 6 changed from 2 -> 1, so we need to use rate = 2
            x = self.inverted_res_block(
                x,
                filters=64,
                alpha=self.alpha,
                stride=1,  # 1!
                expansion=6,
                block_id=6,
                skip_connection=False)
            x = self.inverted_res_block(x,
                                        filters=64,
                                        alpha=self.alpha,
                                        stride=1,
                                        rate=2,
                                        expansion=6,
                                        block_id=7,
                                        skip_connection=True)
            x = self.inverted_res_block(x,
                                        filters=64,
                                        alpha=self.alpha,
                                        stride=1,
                                        rate=2,
                                        expansion=6,
                                        block_id=8,
                                        skip_connection=True)
            x = self.inverted_res_block(x,
                                        filters=64,
                                        alpha=self.alpha,
                                        stride=1,
                                        rate=2,
                                        expansion=6,
                                        block_id=9,
                                        skip_connection=True)

            x = self.inverted_res_block(x,
                                        filters=96,
                                        alpha=self.alpha,
                                        stride=1,
                                        rate=2,
                                        expansion=6,
                                        block_id=10,
                                        skip_connection=False)
            x = self.inverted_res_block(x,
                                        filters=96,
                                        alpha=self.alpha,
                                        stride=1,
                                        rate=2,
                                        expansion=6,
                                        block_id=11,
                                        skip_connection=True)
            x = self.inverted_res_block(x,
                                        filters=96,
                                        alpha=self.alpha,
                                        stride=1,
                                        rate=2,
                                        expansion=6,
                                        block_id=12,
                                        skip_connection=True)

            x = self.inverted_res_block(
                x,
                filters=160,
                alpha=self.alpha,
                stride=1,
                rate=2,  # 1!
                expansion=6,
                block_id=13,
                skip_connection=False)
            x = self.inverted_res_block(x,
                                        filters=160,
                                        alpha=self.alpha,
                                        stride=1,
                                        rate=4,
                                        expansion=6,
                                        block_id=14,
                                        skip_connection=True)
            x = self.inverted_res_block(x,
                                        filters=160,
                                        alpha=self.alpha,
                                        stride=1,
                                        rate=4,
                                        expansion=6,
                                        block_id=15,
                                        skip_connection=True)

            x = self.inverted_res_block(x,
                                        filters=320,
                                        alpha=self.alpha,
                                        stride=1,
                                        rate=4,
                                        expansion=6,
                                        block_id=16,
                                        skip_connection=False)
        # Image Feature branch
        b4 = AveragePooling2D(pool_size=(int(np.ceil(self.input_h / self.OS)),
                                         int(np.ceil(self.input_w /
                                                     self.OS))))(x)

        b4 = Conv2D(256, (1, 1),
                    padding='same',
                    use_bias=False,
                    name='image_pooling')(b4)
        b4 = BatchNormalization(epsilon=1e-5)(b4)
        b4 = Activation(self.activation)(b4)

        b4 = Lambda(lambda x: tf.compat.v1.image.resize_bilinear(
            x,
            size=(int(np.ceil(self.input_h / self.OS)),
                  int(np.ceil(self.input_w / self.OS)))))(b4)

        # simple 1x1
        b0 = Conv2D(256, (1, 1), padding='same', use_bias=False,
                    name='aspp0')(x)
        b0 = BatchNormalization(epsilon=1e-5)(b0)
        b0 = Activation(self.activation)(b0)

        # there are only 2 branches in mobilenetV2. not sure why
        if self.backbone == 'xception':
            # rate = 6 (12)
            b1 = self.depth_sep_conv(x,
                                     256,
                                     rate=atrous_rates[0],
                                     depth_activation=True,
                                     eps=1e-5)
            # rate = 12 (24)
            b2 = self.depth_sep_conv(x,
                                     256,
                                     rate=atrous_rates[1],
                                     depth_activation=True,
                                     eps=1e-5)
            # rate = 18 (36)
            b3 = self.depth_sep_conv(x,
                                     256,
                                     rate=atrous_rates[2],
                                     depth_activation=True,
                                     eps=1e-5)

            # concatenate ASPP branches & project
            x = Concatenate()([b4, b0, b1, b2, b3])
        else:
            x = Concatenate()([b4, b0])

        x = Conv2D(256, (1, 1), padding='same', use_bias=False)(x)
        x = BatchNormalization(epsilon=1e-5)(x)
        x = Activation(self.activation)(x)
        x = Dropout(0.1)(x)

        # DeepLab v.3+ decoder
        if self.backbone == 'xception':
            # Feature projection
            # x4 (x2) block
            x = Lambda(lambda x: tf.compat.v1.image.resize_bilinear(
                x,
                size=(int(np.ceil(self.input_h / 4)),
                      int(np.ceil(self.input_w / 4)))))(x)

            dec_skip1 = Conv2D(48, (1, 1), padding='same',
                               use_bias=False)(skip1)
            dec_skip1 = BatchNormalization(epsilon=1e-5)(dec_skip1)
            dec_skip1 = Activation(self.activation)(dec_skip1)
            x = Concatenate()([x, dec_skip1])
            x = self.depth_sep_conv(x, 256, depth_activation=True, eps=1e-5)
            x = self.depth_sep_conv(x, 256, depth_activation=True, eps=1e-5)

        # Output layer
        x = Conv2D(self.n_classes, (1, 1), padding='same')(x)
        x = Lambda(lambda x: tf.compat.v1.image.resize_bilinear(
            x, size=(self.input_h, self.input_w)))(x)
        x = Activation('softmax')(x)

        # Create model
        model = Model(inputs=img_input, outputs=x)
        model.compile(optimizer=Adam(),
                      loss="categorical_crossentropy",
                      metrics=[
                          dice,
                          jaccard,
                      ])

        # Load models_weights if pre-trained
        if self.pre_trained:
            if os.path.exists(self.weights_path):
                model.load_weights(self.weights_path)
            else:
                raise Exception(f'Failed to load weights {self.weights_path}')

        return model
    def build(pre_trained=False,
              model_path=None,
              n_classes=None,
              input_h=320,
              input_w=320):
        if pre_trained:
            if os.path.exists(model_path):
                model = load_model(model_path,
                                   custom_objects={
                                       'dice': dice,
                                       'preprocess_input': scale_input
                                   })
                model.summary()
                return model
            else:
                raise Exception(
                    f'Failed to load the existing model at {model_path}')

        # Compile model
        inBlock = Input(shape=(input_h, input_w, 3), dtype='float32')
        # Lambda layer: scale input before feeding to the network
        inScaled = Lambda(lambda x: scale_input(x))(inBlock)
        # Block 1d
        convB1d = Conv2D(64, (3, 3),
                         activation='elu',
                         kernel_initializer='he_normal',
                         padding='same')(inScaled)
        convB1d = BatchNormalization()(convB1d)
        #convB1d = Dropout(0.1)(convB1d)
        convB1d = Conv2D(64, (3, 3),
                         activation='elu',
                         kernel_initializer='he_normal',
                         padding='same')(convB1d)
        convB1d = BatchNormalization()(convB1d)
        poolB1d = MaxPooling2D(pool_size=(2, 2))(convB1d)
        # Block 2d
        convB2d = Conv2D(128, (3, 3),
                         activation='elu',
                         kernel_initializer='he_normal',
                         padding='same')(poolB1d)
        convB2d = BatchNormalization()(convB2d)
        #convB2d = Dropout(0.1)(convB2d)
        convB2d = Conv2D(128, (3, 3),
                         activation='elu',
                         kernel_initializer='he_normal',
                         padding='same')(convB2d)
        convB2d = BatchNormalization()(convB2d)
        poolB2d = MaxPooling2D(pool_size=(2, 2))(convB2d)
        # Block 3d
        convB3d = Conv2D(256, (3, 3),
                         activation='elu',
                         kernel_initializer='he_normal',
                         padding='same')(poolB2d)
        convB3d = BatchNormalization()(convB3d)
        #convB3d = Dropout(0.2)(convB3d)
        convB3d = Conv2D(256, (3, 3),
                         activation='elu',
                         kernel_initializer='he_normal',
                         padding='same')(convB3d)
        convB3d = BatchNormalization()(convB3d)
        poolB3d = MaxPooling2D(pool_size=(2, 2))(convB3d)
        # Block 4d
        convB4d = Conv2D(512, (3, 3),
                         activation='elu',
                         kernel_initializer='he_normal',
                         padding='same')(poolB3d)
        convB4d = BatchNormalization()(convB4d)
        #convB4d = Dropout(0.2)(convB4d)
        convB4d = Conv2D(512, (3, 3),
                         activation='elu',
                         kernel_initializer='he_normal',
                         padding='same')(convB4d)
        convB4d = BatchNormalization()(convB4d)
        poolB4d = MaxPooling2D(pool_size=(2, 2))(convB4d)
        # Bottleneck
        convBn = Conv2D(1024, (3, 3),
                        activation='elu',
                        kernel_initializer='he_normal',
                        padding='same')(poolB4d)
        convBn = BatchNormalization()(convBn)
        convBn = Conv2D(1024, (3, 3),
                        activation='elu',
                        kernel_initializer='he_normal',
                        padding='same')(convBn)
        convBn = BatchNormalization()(convBn)
        # Block 4u
        convB4u = Conv2DTranspose(512, (2, 2), strides=(2, 2),
                                  padding='same')(convBn)
        convB4u = concatenate([convB4u, convB4d])
        convB4u = Conv2D(512, (3, 3),
                         activation='elu',
                         kernel_initializer='he_normal',
                         padding='same')(convB4u)
        convB4u = BatchNormalization()(convB4u)
        #convB4u = Dropout(0.2)(convB4u)
        convB4u = Conv2D(512, (3, 3),
                         activation='elu',
                         kernel_initializer='he_normal',
                         padding='same')(convB4u)
        convB4u = BatchNormalization()(convB4u)
        # Block 3u
        convB3u = Conv2DTranspose(256, (2, 2), strides=(2, 2),
                                  padding='same')(convB4u)
        convB3u = concatenate([convB3u, convB3d])
        convB3u = Conv2D(256, (3, 3),
                         activation='elu',
                         kernel_initializer='he_normal',
                         padding='same')(convB3u)
        convB3u = BatchNormalization()(convB3u)
        #convB3u = Dropout(0.2)(convB3u)
        convB3u = Conv2D(256, (3, 3),
                         activation='elu',
                         kernel_initializer='he_normal',
                         padding='same')(convB3u)
        convB3u = BatchNormalization()(convB3u)
        # Block B2u
        convB2u = Conv2DTranspose(128, (2, 2), strides=(2, 2),
                                  padding='same')(convB3u)
        convB2u = concatenate([convB2u, convB2d])
        convB2u = Conv2D(128, (3, 3),
                         activation='elu',
                         kernel_initializer='he_normal',
                         padding='same')(convB2u)
        convB2u = BatchNormalization()(convB2u)
        #convB2u = Dropout(0.1)(convB2u)
        convB2u = Conv2D(128, (3, 3),
                         activation='elu',
                         kernel_initializer='he_normal',
                         padding='same')(convB2u)
        convB2u = BatchNormalization()(convB2u)
        # Block B1u
        convB1u = Conv2DTranspose(64, (2, 2), strides=(2, 2),
                                  padding='same')(convB2u)
        convB1u = concatenate([convB1u, convB1d], axis=3)
        convB1u = Conv2D(64, (3, 3),
                         activation='elu',
                         kernel_initializer='he_normal',
                         padding='same')(convB1u)
        convB1u = BatchNormalization()(convB1u)
        #convB1u = Dropout(0.1)(convB1u)
        convB1u = Conv2D(64, (3, 3),
                         activation='elu',
                         kernel_initializer='he_normal',
                         padding='same')(convB1u)
        convB1u = BatchNormalization()(convB1u)

        # Output layer
        if n_classes < 2:
            outBlock = Conv2D(n_classes, (1, 1),
                              activation='softmax',
                              padding='same')(convB1u)
        else:
            outBlock = Conv2D(1, (1, 1), activation='sigmoid',
                              padding='same')(convB1u)

        # Create model
        model = Model(inputs=inBlock, outputs=outBlock, name="unet_model")
        model.compile(optimizer=Adam(1e-3),
                      loss='categorical_crossentropy',
                      metrics=[dice])
        model.summary()

        return model
Exemple #4
0
    def build(self):
        # ======================================== INPUT ==========================================
        inBlock = Input(shape=(self.input_h, self.input_w, 3), dtype='float32')
        # Lambda layer: scale input before feeding to the network
        inScaled = Lambda(lambda x: scale_input(x))(inBlock)

        # ======================================== ENCODER ========================================
        # Block 1d
        convB1d = Conv2D(64, (3, 3),
                         kernel_initializer=self.kernel_init,
                         padding='same')(inScaled)
        convB1d = BatchNormalization()(convB1d)
        convB1d = Activation(self.activation)(convB1d)
        poolB1d = MaxPooling2D(pool_size=(2, 2))(convB1d)
        # Block 2d
        convB2d = Conv2D(128, (3, 3),
                         kernel_initializer=self.kernel_init,
                         padding='same')(poolB1d)
        convB2d = BatchNormalization()(convB2d)
        convB2d = Activation(self.activation)(convB2d)
        poolB2d = MaxPooling2D(pool_size=(2, 2))(convB2d)
        # Block 3d
        convB3d = Conv2D(256, (3, 3),
                         kernel_initializer=self.kernel_init,
                         padding='same')(poolB2d)
        convB3d = BatchNormalization()(convB3d)
        convB3d = Activation(self.activation)(convB3d)
        poolB3d = MaxPooling2D(pool_size=(2, 2))(convB3d)
        # Block 4d
        convB4d = Conv2D(512, (3, 3),
                         kernel_initializer=self.kernel_init,
                         padding='same')(poolB3d)
        convB4d = BatchNormalization()(convB4d)
        convB4d = Activation(self.activation)(convB4d)

        # ====================================== DECODER =======================================
        # Block 4u
        convB4u = Conv2D(512, (3, 3),
                         kernel_initializer=self.kernel_init,
                         padding='same')(convB4d)
        convB4u = BatchNormalization()(convB4u)
        convB4u = Activation(self.activation)(convB4u)
        # Block 3u
        convB3u = UpSampling2D(size=(2, 2))(convB4u)
        convB3u = Conv2D(256, (3, 3),
                         kernel_initializer=self.kernel_init,
                         padding='same')(convB3u)
        convB3u = BatchNormalization()(convB3u)
        convB3u = Activation(self.activation)(convB3u)
        # Block 2u
        convB2u = UpSampling2D(size=(2, 2))(convB3u)
        convB2u = Conv2D(128, (3, 3),
                         kernel_initializer=self.kernel_init,
                         padding='same')(convB2u)
        convB2u = BatchNormalization()(convB2u)
        convB2u = Activation(self.activation)(convB2u)
        # Block 1u
        convB1u = UpSampling2D(size=(2, 2))(convB2u)
        convB1u = Conv2D(64, (3, 3),
                         kernel_initializer=self.kernel_init,
                         padding='same')(convB1u)
        convB1u = BatchNormalization()(convB1u)
        convB1u = Activation(self.activation)(convB1u)

        # ====================================== OUTPUT =======================================
        if self.n_classes == 2:
            outBlock = Conv2D(1, (1, 1), activation='sigmoid',
                              padding='same')(convB1u)
        else:
            outBlock = Conv2D(self.n_classes, (1, 1),
                              activation='softmax',
                              padding='same')(convB1u)

        # Create model
        model = Model(inputs=inBlock, outputs=outBlock, name=self.model_name)
        model.compile(optimizer=Adam(),
                      loss="categorical_crossentropy",
                      metrics=[
                          dice,
                          jaccard,
                      ])

        # Load models_weights if pre-trained
        if self.pre_trained:
            if os.path.exists(self.weights_path):
                model.load_weights(self.weights_path)
            else:
                raise Exception(
                    f'Failed to load weights at {self.weights_path}')

        return model
Exemple #5
0
    def build(self):
        # Compile model
        inBlock = Input(shape=(self.input_h, self.input_w, 3), dtype='float32')
        # Lambda layer: scale input before feeding to the network
        inScaled = Lambda(lambda x: scale_input(x))(inBlock)
        # =============================================== ENCODING ==================================================
        # Block 1d
        convB1d = Conv2D(64, (3, 3),
                         activation=self.activation,
                         kernel_initializer=self.kernel_init,
                         padding='same')(inScaled)
        convB1d = Conv2D(64, (3, 3),
                         activation=self.activation,
                         kernel_initializer=self.kernel_init,
                         padding='same')(convB1d)
        poolB1d = MaxPooling2D(pool_size=(2, 2))(convB1d)
        # Block 2d
        convB2d = Conv2D(128, (3, 3),
                         activation=self.activation,
                         kernel_initializer=self.kernel_init,
                         padding='same')(poolB1d)
        convB2d = Conv2D(128, (3, 3),
                         activation=self.activation,
                         kernel_initializer=self.kernel_init,
                         padding='same')(convB2d)
        poolB2d = MaxPooling2D(pool_size=(2, 2))(convB2d)
        # Block 3d
        convB3d = Conv2D(256, (3, 3),
                         activation=self.activation,
                         kernel_initializer=self.kernel_init,
                         padding='same')(poolB2d)
        convB3d = Conv2D(256, (3, 3),
                         activation=self.activation,
                         kernel_initializer=self.kernel_init,
                         padding='same')(convB3d)
        dropB3d = Dropout(0.5)(convB3d)
        poolB3d = MaxPooling2D(pool_size=(2, 2))(convB3d)

        # =============================================== BOTTLENECK =================================================
        # Bottleneck - Block D1
        convBnd1 = Conv2D(512, (3, 3),
                          activation=self.activation,
                          kernel_initializer=self.kernel_init,
                          padding='same')(poolB3d)
        convBnd1 = Conv2D(512, (3, 3),
                          activation=self.activation,
                          kernel_initializer=self.kernel_init,
                          padding='same')(convBnd1)
        dropBnd1 = Dropout(0.5)(convBnd1)

        # =============================================== DECODING ==================================================
        # Block 4u
        convB4u = Conv2DTranspose(256,
                                  kernel_size=2,
                                  strides=2,
                                  kernel_initializer=self.kernel_init,
                                  padding='same')(dropBnd1)
        convB4u = BatchNormalization(axis=3)(convB4u)
        convB4u = Activation(self.activation)(convB4u)
        dropB3d = Reshape(target_shape=(
            1, np.int32(self.input_h / 4), np.int32(self.input_w / 4), 256
        ))(
            dropB3d
        )  # just to make sure about shape, but I think the size is already okay :P
        convB4u = Reshape(target_shape=(1, np.int32(self.input_h / 4),
                                        np.int32(self.input_w / 4),
                                        256))(convB4u)
        merge_3d_4u = concatenate([dropB3d, convB4u], axis=1)
        merge_3d_4u = ConvLSTM2D(
            128,
            kernel_size=3,
            padding='same',
            return_sequences=False,
            go_backwards=True,
            kernel_initializer=self.kernel_init)(merge_3d_4u)
        convB4u = Conv2D(256, (3, 3),
                         activation=self.activation,
                         kernel_initializer=self.kernel_init,
                         padding='same')(merge_3d_4u)
        convB4u = Conv2D(256, (3, 3),
                         activation=self.activation,
                         kernel_initializer=self.kernel_init,
                         padding='same')(convB4u)

        # Block 3u
        convB3u = Conv2DTranspose(128, (2, 2),
                                  strides=(2, 2),
                                  kernel_initializer=self.kernel_init,
                                  padding='same')(convB4u)
        convB3u = BatchNormalization(axis=3)(convB3u)
        convB3u = Activation(self.activation)(convB3u)
        convB2d = Reshape(target_shape=(1, np.int32(self.input_h / 2),
                                        np.int32(self.input_w / 2),
                                        128))(convB2d)
        convB3u = Reshape(target_shape=(1, np.int32(self.input_h / 2),
                                        np.int32(self.input_w / 2),
                                        128))(convB3u)
        merge_2d_3u = concatenate([convB2d, convB3u], axis=1)
        merge_2d_3u = ConvLSTM2D(
            128,
            kernel_size=3,
            padding='same',
            return_sequences=False,
            go_backwards=True,
            kernel_initializer=self.kernel_init)(merge_2d_3u)
        convB3u = Conv2D(128, (3, 3),
                         activation=self.activation,
                         kernel_initializer=self.kernel_init,
                         padding='same')(merge_2d_3u)
        convB3u = Conv2D(128, (3, 3),
                         activation=self.activation,
                         kernel_initializer=self.kernel_init,
                         padding='same')(convB3u)

        # Block B2u
        convB2u = Conv2DTranspose(64, (2, 2),
                                  strides=(2, 2),
                                  kernel_initializer=self.kernel_init,
                                  padding='same')(convB3u)
        convB2u = BatchNormalization(axis=3)(convB2u)
        convB2u = Activation(self.activation)(convB2u)
        convB1d = Reshape(target_shape=(1, self.input_h, self.input_w,
                                        64))(convB1d)
        convB2u = Reshape(target_shape=(1, self.input_h, self.input_w,
                                        64))(convB2u)
        merge_1d_2u = concatenate([convB1d, convB2u], axis=1)
        merge_1d_2u = ConvLSTM2D(
            128,
            kernel_size=3,
            padding='same',
            return_sequences=False,
            go_backwards=True,
            kernel_initializer=self.kernel_init)(merge_1d_2u)
        convB2u = Conv2D(64, (3, 3),
                         activation=self.activation,
                         kernel_initializer=self.kernel_init,
                         padding='same')(merge_1d_2u)
        convB2u = Conv2D(64, (3, 3),
                         activation=self.activation,
                         kernel_initializer=self.kernel_init,
                         padding='same')(convB2u)

        # ================================================== OUTPUT =====================================================
        # Output layer
        if self.n_classes == 2:
            outBlock = Conv2D(1, (1, 1), activation='sigmoid',
                              padding='same')(convB2u)
        else:
            outBlock = Conv2D(self.n_classes, (1, 1),
                              activation='softmax',
                              padding='same')(convB2u)

        # Create model
        model = Model(inputs=inBlock, outputs=outBlock, name=self.model_name)
        model.compile(optimizer=Adam(),
                      loss="categorical_crossentropy",
                      metrics=[
                          dice,
                          jaccard,
                      ])

        # Load models_weights if pre-trained
        if self.pre_trained:
            if os.path.exists(self.weights_path):
                model.load_weights(self.weights_path)
            else:
                raise Exception(
                    f'Failed to load weights at {self.weights_path}')

        return model
Exemple #6
0
    def build(self):
        # ======================================== INPUT ==========================================
        inBlock = Input(shape=(self.input_h, self.input_w, 3), dtype='float32')
        # Lambda layer: scale input before feeding to the network
        inScaled = Lambda(lambda x: scale_input(x))(inBlock)

        # ======================================== ENCODER ========================================
        # Input Block
        inConvB = Conv2D(32, (3, 3), padding='same')(inScaled)
        inConvB = BatchNormalization()(inConvB)
        inConvB = ReLU()(inConvB)

        # Block 1
        convB1d = self.depthwise_block(inConvB, 64)
        convB1d = self.depthwise_block(inConvB, 64)
        poolB1d = MaxPooling2D(pool_size=(2, 2))(convB1d)
        # block 2
        convB2d = self.depthwise_block(poolB1d, 128)
        convB2d = self.depthwise_block(convB2d, 128)
        poolB2d = MaxPooling2D(pool_size=(2, 2))(convB2d)
        # block 3
        convB3d = self.depthwise_block(poolB2d, 256)
        convB3d = self.depthwise_block(convB3d, 256)
        poolB3d = MaxPooling2D(pool_size=(2, 2))(convB3d)
        # block 4
        convB4d = self.depthwise_block(poolB3d, 512)
        convB4d = self.depthwise_block(convB4d, 512)
        poolB4d = MaxPooling2D(pool_size=(2, 2))(convB4d)

        # ===================================== BOTTLENECK ======================================
        convBn = self.depthwise_block(poolB4d, 1024)
        convBn = self.depthwise_block(convBn, 512)

        # ====================================== DECODER =======================================
        # Block 4u
        convB4u = Conv2DTranspose(258, (2, 2), strides=(2, 2), padding='same')(convBn)
        convB4u = concatenate([convB4u, convB4d])
        convB4u = self.depthwise_block(convB4u, 512)
        convB4u = self.depthwise_block(convB4u, 256)
        # Block 3u
        convB3u = Conv2DTranspose(256, (2, 2), strides=(2, 2), padding='same')(convB4u)
        convB3u = concatenate([convB3u, convB3d])
        convB3u = self.depthwise_block(convB3u, 256)
        convB3u = self.depthwise_block(convB3u, 128)
        # Block 2u
        convB2u = Conv2DTranspose(128, (2, 2), strides=(2, 2), padding='same')(convB3u)
        convB2u = concatenate([convB2u, convB2d])
        convB2u = self.depthwise_block(convB2u, 128)
        convB2u = self.depthwise_block(convB2u, 64)
        # Block 1u
        convB1u = Conv2DTranspose(64, (2, 2), strides=(2, 2), padding='same')(convB2u)
        convB1u = concatenate([convB1u, convB1d], axis=3)
        convB1u = self.depthwise_block(convB1u, 64)
        convB1u = self.depthwise_block(convB1u, 64)

        # ======================================== OUTPUT ==========================================
        if self.n_classes == 2:
            outBlock = Conv2D(1, (1, 1), activation='sigmoid', padding='same')(convB1u)
        else:
            outBlock = Conv2D(self.n_classes, (1, 1), activation='softmax', padding='same')(convB1u)

        # Create model
        model = Model(inputs=inBlock, outputs=outBlock, name=self.model_name)
        model.compile(optimizer=Adam(),
                      loss="categorical_crossentropy",
                      metrics=[dice, jaccard, ]
                      )

        # Load models_weights if pre-trained
        if self.pre_trained:
            if os.path.exists(self.weights_path):
                model.load_weights(self.weights_path)
            else:
                raise Exception(f'Failed to load weights at {self.weights_path}')

        return model
Exemple #7
0
    def build(self):
        # ======================================== INPUT ==========================================
        input_layer = Input(shape=(self.input_h, self.input_w, 3),
                            dtype='float32')
        # Lambda layer: scale input before feeding to the network
        inScaled = Lambda(lambda x: scale_input(x))(input_layer)
        # First block
        stack = Conv2D(self.input_filters,
                       kernel_size=(3, 3),
                       strides=(1, 1),
                       padding='same',
                       data_format='channels_last')(inScaled)

        # ======================================== ENCODER ========================================
        # To save the output of each dense block of the down-sampling path to later concatenate to the transition up
        skip_connections = list()
        for i in range(self.n_pool):
            # DB: Dense-Block
            for j in range(self.n_layers_per_dense_block[i]):
                l = self.dense_block(stack, self.growth_rate)
                stack = concatenate([stack, l])
                # Update filters
                self.input_filters += self.growth_rate
                # save the current output
            skip_connections.append(stack)
            # TD: Transition-Up
            stack = self.transition_down(stack, self.input_filters)

        # ===================================== BOTTLENECK ======================================
        # store the output of each dense block and upsample them all as well
        block_to_upsample = []
        # Create the bottleneck dense block
        for i in range(self.n_layers_per_dense_block[self.n_pool]):
            # DB: Dense-Block
            l = self.dense_block(stack, self.growth_rate)
            block_to_upsample.append(l)
            stack = concatenate([stack, l])

        # ====================================== DECODER =======================================
        # Revert the order of layers within the skip-connections to get the last pooling-layer at index=0
        skip_connections = skip_connections[::-1]

        for j in range(self.n_pool):
            # Updating filters is specific for each variant
            if self.model_name == 'fcn_densenet_56' or self.model_name == 'fcn_densenet_67':
                keep_filters = self.n_layers_per_dense_block * self.growth_rate
            else:
                keep_filters = self.n_layers_per_dense_block[
                    self.n_pool + j] * self.growth_rate
            # TU
            stack = self.transition_up(skip_connections[j], block_to_upsample,
                                       keep_filters)
            # Dense Block
            block_to_upsample = []
            for k in range(self.n_layers_per_dense_block[self.n_pool + j + 1]):
                l = self.dense_block(stack, self.growth_rate)
                block_to_upsample.append(l)
                stack = concatenate([stack, l])

        # ======================================== OUTPUT ==========================================
        if self.n_classes == 2:
            stack = Conv2D(1, (1, 1),
                           activation='sigmoid',
                           padding='same',
                           data_format='channels_last')(stack)
        else:
            stack = Conv2D(self.n_classes, (1, 1),
                           activation='softmax',
                           padding='same',
                           data_format='channels_last')(stack)

        # Compile model
        model = Model(inputs=input_layer, outputs=stack, name=self.model_name)
        model.compile(optimizer=Adam(),
                      loss="categorical_crossentropy",
                      metrics=[
                          dice,
                          jaccard,
                      ])

        # Load models_weights if pre-trained
        if self.pre_trained:
            if os.path.exists(self.weights_path):
                model.load_weights(self.weights_path)
            else:
                raise Exception(
                    f'Failed to load weights at {self.weights_path}')

        return model
Exemple #8
0
    def build(self):
        inBlock = Input(shape=(self.input_h, self.input_w, 3), dtype='float32')
        # Lambda layer: scale input before feeding to the network
        inScaled = Lambda(lambda x: scale_input(x))(inBlock)

        # Block 1
        x = Conv2D(64, (3, 3), activation=self.activation, kernel_initializer=self.kernel_init, padding='same')(inScaled)
        x = Conv2D(64, (3, 3), activation=self.activation, kernel_initializer=self.kernel_init, padding='same')(x)
        x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2),)(x)
        f1 = x

        # Block 2
        x = Conv2D(128, (3, 3), activation=self.activation, kernel_initializer=self.kernel_init, padding='same')(x)
        x = Conv2D(128, (3, 3), activation=self.activation, kernel_initializer=self.kernel_init, padding='same')(x)
        x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2))(x)
        f2 = x

        # Block 3
        x = Conv2D(256, (3, 3), activation=self.activation, kernel_initializer=self.kernel_init, padding='same')(x)
        x = Conv2D(256, (3, 3), activation=self.activation, kernel_initializer=self.kernel_init, padding='same')(x)
        x = Conv2D(256, (3, 3), activation=self.activation, kernel_initializer=self.kernel_init, padding='same')(x)
        x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2))(x)
        pool3 = x

        # Block 4
        x = Conv2D(256, (3, 3), activation=self.activation, kernel_initializer=self.kernel_init, padding='same')(pool3)
        x = Conv2D(256, (3, 3), activation=self.activation, kernel_initializer=self.kernel_init, padding='same')(x)
        x = Conv2D(256, (3, 3), activation=self.activation, kernel_initializer=self.kernel_init, padding='same')(x)
        x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2))(x)
        pool4 = x

        # Block 5
        x = Conv2D(512, (3, 3), activation=self.activation, kernel_initializer=self.kernel_init, padding='same')(pool4)
        x = Conv2D(512, (3, 3), activation=self.activation, kernel_initializer=self.kernel_init, padding='same')(x)
        x = Conv2D(512, (3, 3), activation=self.activation, kernel_initializer=self.kernel_init, padding='same')(x)
        x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2))(x)
        pool5 = x

        conv6 = Conv2D(2048, (7, 7), activation=self.activation, kernel_initializer=self.kernel_init, padding='same')(pool5)
        conv7 = Conv2D(2048, (1, 1), activation=self.activation, kernel_initializer=self.kernel_init, padding='same')(conv6)

        pool4_n = Conv2D(self.n_classes, (1, 1), activation=self.activation, kernel_initializer=self.kernel_init, padding='same')(pool4)
        u2 = Conv2DTranspose(self.n_classes, kernel_size=(2, 2), strides=(2, 2), activation=self.activation, kernel_initializer=self.kernel_init, padding='same')(conv7)
        # skip connection between pool_4(after 1x1 convolution) & conv7(upsampled 2 times)
        u2_skip = Add()([pool4_n, u2])

        pool3_n = Conv2D(self.n_classes, (1, 1), activation=self.activation, kernel_initializer=self.kernel_init, padding='same')(pool3)
        u4 = Conv2DTranspose(self.n_classes, kernel_size=(2, 2), strides=(2, 2), activation=self.activation, kernel_initializer=self.kernel_init, padding='same')(u2_skip)
        # skip connection between pool_3(after 1x1 convolution) & the result of the previous upsampling(again upsampled 4 times)
        u4_skip = Add()([pool3_n, u4])

        # Output layer
        outBlock = Conv2DTranspose(self.n_classes, kernel_size=(8, 8), strides=(8, 8), padding='same', activation='softmax')(u4_skip)

        # Create model
        model = Model(inputs=inBlock, outputs=outBlock, name=self.model_name)
        model.compile(optimizer=Adam(),
                      loss="categorical_crossentropy",
                      metrics=[dice, jaccard, ]
                      )

        # Load models_weights if pre-trained
        if self.pre_trained:
            if os.path.exists(self.weights_path):
                model.load_weights(self.weights_path)
            else:
                raise Exception(f'Failed to load weights at {self.weights_path}')

        return model