Exemple #1
0
    def __init__(self, num_features=256,
                 atrous_rate=(6, 12, 18),
                 groups=16, **kwargs):
        self.num_features = num_features
        self.atrous_rate = atrous_rate
        self.groups = groups
        super().__init__(**kwargs)
        # ASPP 1x1 Branch
        self.aspp_1x1_branch = []
        self.aspp_1x1_branch.append(Conv2D(num_features, (1, 1),
                                           use_bias=False, name='aspp_1x1'))
        self.aspp_1x1_branch.append(GroupNormalization(groups=groups,
                                                       name='aspp_1x1_GN'))
        self.aspp_1x1_branch.append(ReLU(name='aspp_1x1_relu'))

        # ASPP Middle Branch
        self.aspp_branches = []
        for rate in atrous_rate:
            self.aspp_branches.append(
                AtrousSeparableConv2D(num_features, dilation_rate=rate,
                                      groups=groups, name=f'aspp_{rate}'))

        # ASPP Pooling Branch
        self.aspp_pool_conv = Conv2D(num_features, (1, 1),
                                     activation='relu',
                                     use_bias=False,
                                     name='aspp_pool')

        # ASPP Concat Layer
        self.concat_branch = []
        self.concat_branch.append(Conv2D(
            num_features, (1, 1), use_bias=False, name='concat_projection'))
        self.concat_branch.append(GroupNormalization(
            groups=groups, name='concat_projection_GN'))
        self.concat_branch.append(ReLU(name='concat_projection_relu'))
    def __init__(self,
                 num_blocks,
                 num_classes,
                 num_depth=4,
                 num_features=256,
                 use_separable_conv=False,
                 expand_ratio=4.,
                 use_squeeze_excite=False,
                 squeeze_ratio=16.,
                 groups=16,
                 **kwargs):
        self.num_blocks = num_blocks
        self.num_classes = num_classes
        self.num_depth = num_depth
        self.num_features = num_features
        self.use_separable_conv = use_separable_conv
        self.expand_ratio = expand_ratio
        self.use_squeeze_excite = use_squeeze_excite
        self.squeeze_ratio = squeeze_ratio
        self.groups = groups
        super().__init__(**kwargs)
        self.blocks = []
        for idx in range(self.num_blocks):
            block = []
            for i in range(self.num_depth):
                if self.use_squeeze_excite:
                    layer = SqueezeExcite(self.squeeze_ratio)
                    block.append(layer)

                if self.use_separable_conv:
                    layer = MobileSeparableConv2D(num_features, (3, 3),
                                                  expand_ratio=expand_ratio)
                else:
                    layer = Conv2D(
                        num_features, (3, 3),
                        activation='relu',
                        padding='same',
                        kernel_initializer=RandomNormal(stddev=0.01))
                block.append(layer)

                layer = GroupNormalization(self.groups)
                block.append(layer)

            layer = Conv2DTranspose(
                num_features, (2, 2), (2, 2),
                padding='same',
                activation='relu',
                kernel_initializer=RandomNormal(stddev=0.01))
            block.append(layer)
            layer = Conv2D(num_classes, (1, 1),
                           padding='same',
                           activation='sigmoid',
                           kernel_initializer=RandomNormal(stddev=0.01))
            block.append(layer)
            self.blocks.append(block)
Exemple #3
0
    def __init__(self, filters, dilation_rate=3,
                 groups=16, **kwargs):
        prefix = kwargs.get('name', 'AtrousSeparableConv2d')
        super().__init__(**kwargs)

        self.groups = groups
        self.filters = filters
        self.dilation_rate = dilation_rate

        self.depth_conv2d = DepthwiseConv2D((3, 3), dilation_rate=dilation_rate,
                                            padding='same', use_bias=False,
                                            name=prefix + '_depthwise')
        self.point_conv2d = Conv2D(self.filters, (1, 1), use_bias=False,
                                   name=prefix + '_pointwise')

        self.depth_norm = GroupNormalization(groups=self.groups, name=prefix + '_depthwise_GN')
        self.point_norm = GroupNormalization(groups=self.groups, name=prefix + '_pointwise_GN')

        self.depth_relu = ReLU(name=prefix+'_depthwise_relu')
        self.point_relu = ReLU(name=prefix+'_pointwise_relu')
Exemple #4
0
    def __init__(self,
                 filters,
                 kernel_size=(3, 3),
                 expand_ratio=4.,
                 stride=1,
                 groups=16,
                 **kwargs):
        prefix = kwargs.get('name', 'SeparableConv2d')
        super().__init__(**kwargs)

        self.filters = filters
        if isinstance(kernel_size, int):
            self.kernel_size = (kernel_size, kernel_size)
        else:
            self.kernel_size = kernel_size
        self.expand_ratio = expand_ratio
        self.stride = stride
        self.groups = groups

        self.expand_conv2d = Conv2D(int(self.expand_ratio * filters), (1, 1),
                                    use_bias=False,
                                    name=prefix + '_expand_conv')
        self.expand_norm = GroupNormalization(groups=self.groups,
                                              name=prefix + '_expand_GN')
        self.expand_relu = ReLU(name=prefix + '_expand_relu')

        self.depth_conv2d = DepthwiseConv2D(self.kernel_size, (stride, stride),
                                            padding='same',
                                            use_bias=False,
                                            name=prefix + '_depthwise')
        self.depth_norm = GroupNormalization(groups=self.groups,
                                             name=prefix + '_depthwise_GN')
        self.depth_relu = ReLU(name=prefix + 'depthwise_relu')

        self.squeeze_conv2d = Conv2D(filters, (1, 1),
                                     use_bias=False,
                                     name=prefix + '_squeeze_conv')
        self.squeeze_norm = GroupNormalization(groups=self.groups,
                                               name=prefix + '_squeeze_GN')
        self.skip_connection = Add(name=prefix + "skip_connection")
Exemple #5
0
    def __init__(self, num_depth=2, num_features=256,
                 num_skip_features=48, num_classes=3,
                 use_separable_conv=False, expand_ratio=4.,
                 use_squeeze_excite=False, squeeze_ratio=16.,
                 groups=16, **kwargs):
        self.num_depth = num_depth
        self.num_features = num_features
        self.num_skip_features = num_skip_features
        self.num_classes = num_classes
        self.use_separable_conv = use_separable_conv
        self.expand_ratio = expand_ratio
        self.use_squeeze_excite = use_squeeze_excite
        self.squeeze_ratio = squeeze_ratio
        self.groups = groups
        super().__init__(kwargs)
        self.skip_layers = []
        self.skip_layers.append(Conv2D(self.num_skip_features, (1, 1), use_bias=False,
                                       name='skip_projection'))
        self.skip_layers.append(GroupNormalization(groups=groups, name='skip_projection_GN'))
        self.skip_layers.append(ReLU(name='skip_projection_relu'))

        self.block = []
        for i in range(self.num_depth):
            if self.use_squeeze_excite:
                layer = SqueezeExcite(self.squeeze_ratio)
                self.block.append(layer)
            if self.use_separable_conv:
                layer = MobileSeparableConv2D(self.num_features, (3, 3),
                                              expand_ratio=self.expand_ratio)
            else:
                layer = Conv2D(num_features, (3, 3), activation='relu', padding='same',
                               kernel_initializer=RandomNormal(stddev=0.01))
            self.block.append(layer)
            layer = GroupNormalization(self.groups)
            self.block.append(layer)

        self.output_layer = Conv2D(num_classes, (1, 1), activation='sigmoid')
Exemple #6
0
def load_backbone(backbone_type="resnet50",
                  backbone_outputs=('C3', 'C4', 'C5', 'P6', 'P7'),
                  num_features=256):
    global BACKBONE_LAYERS
    inputs = Input((None, None, 3), name='images')
    if backbone_type.lower() == 'resnet50':
        preprocess = BackBonePreProcess(rgb=False,
                                        mean_shift=True,
                                        normalize=0)(inputs)
        model = ResNet50(input_tensor=preprocess, include_top=False)
    elif backbone_type.lower() == 'resnet50v2':
        preprocess = BackBonePreProcess(rgb=True, mean_shift=True,
                                        normalize=2)(inputs)
        resnet50v2, _ = Classifiers.get('resnet50v2')
        model = resnet50v2(input_tensor=preprocess,
                           include_top=False,
                           weights='imagenet')
    elif backbone_type.lower() == "resnet101v2":
        preprocess = BackBonePreProcess(rgb=True,
                                        mean_shift=False,
                                        normalize=2)(inputs)
        model = ResNet101V2(input_tensor=preprocess,
                            include_top=False,
                            backend=tf.keras.backend,
                            layers=tf.keras.layers,
                            models=tf.keras.models,
                            utils=tf.keras.utils)
    elif backbone_type.lower() == 'resnext50':
        preprocess = BackBonePreProcess(rgb=True, mean_shift=True,
                                        normalize=2)(inputs)
        model = ResNeXt50(input_tensor=preprocess, include_top=False)
    elif backbone_type.lower() == "seresnet50":
        preprocess = BackBonePreProcess(rgb=True, mean_shift=True,
                                        normalize=3)(inputs)
        seresnet50, _ = Classifiers.get('seresnet50')
        model = seresnet50(input_tensor=preprocess,
                           original_input=inputs,
                           include_top=False,
                           weights='imagenet')
    elif backbone_type.lower() == "seresnet34":
        preprocess = BackBonePreProcess(rgb=True,
                                        mean_shift=False,
                                        normalize=0)(inputs)
        seresnet34, _ = Classifiers.get('seresnet34')
        model = seresnet34(input_tensor=preprocess,
                           original_input=inputs,
                           include_top=False,
                           weights='imagenet')
    elif backbone_type.lower() == "seresnext50":
        preprocess = BackBonePreProcess(rgb=True, mean_shift=True,
                                        normalize=3)(inputs)
        seresnext50, _ = Classifiers.get('seresnext50')
        model = seresnext50(input_tensor=preprocess,
                            original_input=inputs,
                            include_top=False,
                            weights='imagenet')
    elif backbone_type.lower() == "vgg16":
        preprocess = BackBonePreProcess(rgb=False,
                                        mean_shift=True,
                                        normalize=0)(inputs)
        model = VGG16(input_tensor=preprocess, include_top=False)
    elif backbone_type.lower() == "mobilenet":
        preprocess = BackBonePreProcess(rgb=False,
                                        mean_shift=False,
                                        normalize=2)(inputs)
        model = MobileNet(input_tensor=preprocess,
                          include_top=False,
                          alpha=1.0)
    elif backbone_type.lower() == 'efficientnetb2':
        preprocess = BackBonePreProcess(rgb=True, mean_shift=True,
                                        normalize=3)(inputs)
        model = efn.EfficientNetB2(input_tensor=preprocess,
                                   include_top=False,
                                   weights='imagenet')
    elif backbone_type.lower() == 'efficientnetb3':
        preprocess = BackBonePreProcess(rgb=True, mean_shift=True,
                                        normalize=3)(inputs)
        model = efn.EfficientNetB3(input_tensor=preprocess,
                                   include_top=False,
                                   weights='imagenet')
    elif backbone_type.lower() == 'efficientnetb4':
        preprocess = BackBonePreProcess(rgb=True, mean_shift=True,
                                        normalize=3)(inputs)
        model = efn.EfficientNetB4(input_tensor=preprocess,
                                   include_top=False,
                                   weights='imagenet')
    else:
        raise NotImplementedError(
            f"backbone_type은 {BACKBONE_LAYERS.keys()} 중에서 하나가 되어야 합니다.")
    model.trainable = False

    # Block Layer 가져오기
    features = []
    for key, layer_name in BACKBONE_LAYERS[backbone_type.lower()].items():
        if key in backbone_outputs:
            layer_tensor = model.get_layer(layer_name).output
            features.append(Identity(name=key)(layer_tensor))

    if backbone_type.lower() == "mobilenet":
        # Extra Layer for Feature Extracting
        Z6 = ZeroPadding2D(((0, 1), (0, 1)),
                           name=f'P6_zeropadding')(features[-1])
        P6 = Conv2D(num_features, (3, 3),
                    strides=(2, 2),
                    padding='valid',
                    activation='relu',
                    name=f'P6_conv')(Z6)
        if 'P6' in backbone_outputs:
            features.append(Identity(name='P6')(P6))
        G6 = GroupNormalization(name=f'P6_norm')(P6)
        Z7 = ZeroPadding2D(((0, 1), (0, 1)), name=f'P7_zeropadding')(G6)
        P7 = Conv2D(num_features, (3, 3),
                    strides=(2, 2),
                    padding='valid',
                    activation='relu',
                    name=f'P7_conv')(Z7)
        if 'P7' in backbone_outputs:
            features.append(Identity(name=f'P7')(P7))
    else:
        P6 = Conv2D(num_features, (3, 3),
                    strides=(2, 2),
                    padding='same',
                    activation='relu',
                    name=f'P6_conv')(features[-1])
        if 'P6' in backbone_outputs:
            features.append(Identity(name=f'P6')(P6))
        G6 = GroupNormalization(name=f'P6_norm')(P6)
        P7 = Conv2D(num_features, (3, 3),
                    strides=(2, 2),
                    padding='same',
                    activation='relu',
                    name=f'P7_conv')(G6)
        if 'P7' in backbone_outputs:
            features.append(Identity(name=f'P7')(P7))

    return Model(inputs, features, name=backbone_type)