Ejemplo n.º 1
0
    def _aspp(self, x, out_filters):
        xs = list()
        x1 = layers.Conv2D(out_filters,
                           1,
                           strides=1,
                           kernel_initializer='he_normal')(x)
        xs.append(x1)

        for i in range(3):
            xi = layers.Conv2D(out_filters,
                               3,
                               strides=1,
                               padding='same',
                               dilation_rate=6 * (i + 1))(x)
            xs.append(xi)
        img_pool = custom_layers.GlobalAveragePooling2D(keep_dims=True)(x)
        img_pool = layers.Conv2D(out_filters,
                                 1,
                                 1,
                                 kernel_initializer='he_normal')(img_pool)
        img_pool = layers.UpSampling2D(size=self.aspp_size,
                                       interpolation='bilinear')(img_pool)
        xs.append(img_pool)

        x = custom_layers.Concatenate(out_size=self.aspp_size)(xs)
        x = layers.Conv2D(out_filters,
                          1,
                          strides=1,
                          kernel_initializer='he_normal')(x)
        x = layers.BatchNormalization()(x)

        return x
Ejemplo n.º 2
0
    def _attention_refinement_module(self, x):
        # Global average pooling
        _, _, _, c = backend.int_shape(x)

        glb = custom_layers.GlobalAveragePooling2D(keep_dims=True)(x)
        glb = layers.Conv2D(c, 1, strides=1, kernel_initializer='he_normal')(glb)
        glb = layers.BatchNormalization()(glb)
        glb = layers.Activation(activation='sigmoid')(glb)

        x = layers.Multiply()([x, glb])

        return x
Ejemplo n.º 3
0
    def _feature_fusion_module(self, input_1, input_2, filters):
        inputs = layers.Concatenate()([input_1, input_2])
        inputs = self._conv_block(inputs, filters=filters, kernel_size=3)

        # Global average pooling
        _, _, _, c = backend.int_shape(inputs)

        glb = custom_layers.GlobalAveragePooling2D(keep_dims=True)(inputs)
        glb = layers.Conv2D(filters, 1, strides=1, activation='relu', kernel_initializer='he_normal')(glb)
        glb = layers.Conv2D(filters, 1, strides=1, activation='sigmoid', kernel_initializer='he_normal')(glb)

        x = layers.Multiply()([inputs, glb])

        return x
Ejemplo n.º 4
0
    def _gau(self, x, y, out_filters, up_size=(2, 2)):
        glb = custom_layers.GlobalAveragePooling2D(keep_dims=True)(y)
        glb = layers.Conv2D(out_filters,
                            1,
                            strides=1,
                            activation='sigmoid',
                            kernel_initializer='he_normal')(glb)

        x = self._conv_bn_relu(x, out_filters, 3, 1)
        x = layers.Multiply()([x, glb])

        y = layers.UpSampling2D(size=up_size, interpolation='bilinear')(y)

        y = layers.Add()([x, y])

        return y
Ejemplo n.º 5
0
    def _fpa(self, x, out_filters):
        _, h, w, _ = backend.int_shape(x)

        # global average pooling
        glb = custom_layers.GlobalAveragePooling2D(keep_dims=True)(x)
        glb = layers.Conv2D(out_filters,
                            1,
                            strides=1,
                            kernel_initializer='he_normal')(glb)

        # down
        down1 = layers.AveragePooling2D(pool_size=(2, 2))(x)
        down1 = self._conv_bn_relu(down1, 1, 7, 1)

        down2 = layers.AveragePooling2D(pool_size=(2, 2))(down1)
        down2 = self._conv_bn_relu(down2, 1, 5, 1)

        down3 = layers.AveragePooling2D(pool_size=(2, 2))(down2)
        down3 = self._conv_bn_relu(down3, 1, 3, 1)

        down1 = self._conv_bn_relu(down1, 1, 7, 1)
        down2 = self._conv_bn_relu(down2, 1, 5, 1)
        down3 = self._conv_bn_relu(down3, 1, 3, 1)

        # up
        up2 = layers.UpSampling2D(size=(2, 2))(down3)
        up2 = layers.Add()([up2, down2])

        up1 = layers.UpSampling2D(size=(2, 2))(up2)
        up1 = layers.Add()([up1, down1])

        up = layers.UpSampling2D(size=(2, 2))(up1)

        x = layers.Conv2D(out_filters,
                          1,
                          strides=1,
                          kernel_initializer='he_normal')(x)
        x = layers.BatchNormalization()(x)

        # multiply
        x = layers.Multiply()([x, up])

        # add
        x = layers.Add()([x, glb])

        return x
Ejemplo n.º 6
0
    def _bisegnet(self, inputs):
        num_classes = self.num_classes

        # the spatial path
        sx = self._conv_block(inputs, 64, 3, 2)
        sx = self._conv_block(sx, 128, 3, 2)
        sx = self._conv_block(sx, 256, 3, 2)

        # the context path
        if self.base_model in ['VGG16',
                               'VGG19',
                               'ResNet50',
                               'ResNet101',
                               'ResNet152',
                               'MobileNetV1',
                               'MobileNetV2',
                               'Xception',
                               'Xception-DeepLab']:
            c4, c5 = self.encoder(inputs, output_stages=['c4', 'c5'])
        else:
            c4, c5 = self.encoder(inputs, output_stages=['c3', 'c5'])

        c4 = self._attention_refinement_module(c4)
        c5 = self._attention_refinement_module(c5)

        glb = custom_layers.GlobalAveragePooling2D(keep_dims=True)(c5)
        c5 = layers.Multiply()([c5, glb])

        # combining the paths
        c4 = layers.UpSampling2D(size=(2, 2), interpolation='bilinear')(c4)
        c5 = layers.UpSampling2D(size=(4, 4), interpolation='bilinear')(c5)

        cx = layers.Concatenate()([c4, c5])

        x = self._feature_fusion_module(sx, cx, num_classes)

        x = layers.UpSampling2D(size=(8, 8), interpolation='bilinear')(x)
        x = layers.Conv2D(num_classes, 1, 1, kernel_initializer='he_normal')(x)

        outputs = x

        return models.Model(inputs, outputs, name=self.version)
Ejemplo n.º 7
0
    def _pspnet(self, inputs):
        num_classes = self.num_classes
        _, inputs_h, inputs_w, _ = backend.int_shape(inputs)

        h, w = inputs_h // 8, inputs_w // 8
        x = self.encoder(inputs)

        if not (h % 6 == 0 and w % 6 == 0):
            raise ValueError(
                '\'pyramid pooling\' size must be divided by 6, but received {size}'
                .format(size=(h, w)))
        pool_size = [(h, w), (h // 2, w // 2), (h // 3, w // 3),
                     (h // 6, w // 6)]

        # pyramid pooling
        x1 = custom_layers.GlobalAveragePooling2D(keep_dims=True)(x)
        x1 = layers.Conv2D(512, 1, strides=1,
                           kernel_initializer='he_normal')(x1)
        x1 = layers.BatchNormalization()(x1)
        x1 = layers.ReLU()(x1)
        x1 = layers.UpSampling2D(size=pool_size[0])(x1)

        x2 = layers.AveragePooling2D(pool_size=pool_size[1])(x)
        x2 = layers.Conv2D(512, 1, strides=1,
                           kernel_initializer='he_normal')(x2)
        x2 = layers.BatchNormalization()(x2)
        x2 = layers.ReLU()(x2)
        x2 = layers.UpSampling2D(size=pool_size[1])(x2)

        x3 = layers.AveragePooling2D(pool_size=pool_size[2])(x)
        x3 = layers.Conv2D(512, 1, strides=1,
                           kernel_initializer='he_normal')(x3)
        x3 = layers.BatchNormalization()(x3)
        x3 = layers.ReLU()(x3)
        x3 = layers.UpSampling2D(size=pool_size[2])(x3)

        x6 = layers.AveragePooling2D(pool_size=pool_size[3])(x)
        x6 = layers.Conv2D(512, 1, strides=1,
                           kernel_initializer='he_normal')(x6)
        x6 = layers.BatchNormalization()(x6)
        x6 = layers.ReLU()(x6)
        x6 = layers.UpSampling2D(size=pool_size[3])(x6)

        x = layers.Concatenate()([x, x1, x2, x3, x6])

        x = layers.Conv2D(512,
                          3,
                          strides=1,
                          padding='same',
                          kernel_initializer='he_normal')(x)
        x = layers.BatchNormalization()(x)
        x = layers.ReLU()(x)

        x = layers.Conv2D(num_classes,
                          1,
                          strides=1,
                          kernel_initializer='he_normal')(x)
        x = layers.BatchNormalization()(x)

        x = layers.UpSampling2D(size=(8, 8), interpolation='bilinear')(x)

        outputs = x

        return models.Model(inputs, outputs, name=self.version)