示例#1
0
    def __init__(self,
                 backbond,
                 input_shape=(224, 224, 3),
                 atrous_rates=(6, 12, 18, 24),
                 num_filters=256,
                 classes=20):
        super(_DeeplabV3_plus, self).__init__()
        moduals = list(backbond.children())
        low_level_idx = -1
        high_level_idx = -1
        for i in range(len(moduals)):
            if low_level_idx < 0 and moduals[i].output_shape[
                    -2] == backbond.input_shape[-2] // 8:
                low_level_idx = i

            if high_level_idx < 0 and moduals[i].output_shape[
                    -2] == backbond.input_shape[-2] // 32:
                high_level_idx = i
                break
        self.num_filters = num_filters
        self.classes = classes
        self.atrous_rates = atrous_rates
        self.backbond1 = Sequential(backbond[:low_level_idx])
        self.backbond2 = Sequential(backbond[low_level_idx:high_level_idx])
        self.aspp = ASPP(atrous_rates=self.atrous_rates,
                         num_filters=self.num_filters)
        self.low_level_conv = Conv2d_Block(
            (1, 1),
            num_filters=int(48 * self.num_filters / 256),
            strides=1,
            use_bias=False,
            activation='leaky_relu',
            normalization='batch')
        self.decoder = Sequential(
            DepthwiseConv2d_Block((3, 3),
                                  depth_multiplier=0.5,
                                  strides=1,
                                  use_bias=False,
                                  activation='leaky_relu',
                                  normalization='batch',
                                  dropout_rate=0.5),
            DepthwiseConv2d_Block((3, 3),
                                  depth_multiplier=1,
                                  strides=1,
                                  use_bias=False,
                                  activation='leaky_relu',
                                  normalization='batch',
                                  dropout_rate=0.1),
            Conv2d((1, 1),
                   num_filters=self.classes,
                   strides=1,
                   use_bias=False,
                   activation='sigmoid'),
        )
def DenseLayer(growth_rate, name=''):
    """
    The basic normalization, convolution and activation combination for dense connection

    Args:
        growth_rate (int):The growth rate regulates how much new information each layer contributes to the global state
        name (str): None of this dense layer

    Returns:
        An instrance of dense layer.

    """
    items = OrderedDict()
    items['norm'] = BatchNorm2d()
    items['relu'] = Relu()
    items['conv1'] = Conv2d_Block((1, 1),
                                  num_filters=4 * growth_rate,
                                  strides=1,
                                  activation='relu',
                                  auto_pad=True,
                                  padding_mode='zero',
                                  use_bias=False,
                                  normalization='batch')
    items['conv2'] = Conv2d((3, 3),
                            num_filters=growth_rate,
                            strides=1,
                            auto_pad=True,
                            padding_mode='zero',
                            use_bias=False)
    return Sequential(items)
示例#3
0
def ASPP(atrous_rates=(6, 12, 18), num_filters=256):
    layers = OrderedDict()
    layers['conv1'] = Conv2d_Block((1, 1),
                                   num_filters=num_filters,
                                   strides=1,
                                   use_bias=False,
                                   activation=None,
                                   normalization='batch')
    for i in range(len(atrous_rates)):
        layers['aspp_dilation{0}'.format(i)] = Conv2d_Block(
            (3, 3),
            num_filters=num_filters,
            strides=1,
            use_bias=False,
            activation=None,
            normalization='batch',
            dilation=atrous_rates[i])
    layers['aspp_pooling'] = ASPPPooling(num_filters)
    return Sequential(
        ShortCut2d(layers, mode='concate'),
        Conv2d_Block((1, 1),
                     num_filters,
                     strides=1,
                     use_bias=False,
                     bias=False,
                     activation='relu',
                     normalization='batch',
                     dilation=1,
                     dropout_rate=0.5,
                     name='project'))
def ASPPPooling(num_filters, size):
    return Sequential(
        GlobalAvgPool2d(),
        Conv2d((1, 1), num_filters, strides=1, use_bias=False,
               activation=None),
        Upsampling2d(size=(size[-3], size[-2]),
                     mode='bilinear',
                     align_corners=False))
示例#5
0
def DeeplabV3(backbond, input_shape=(224, 224, 3), classes=20, **kwargs):
    input_shape = tuple(input_shape)
    deeplab = Sequential(name='deeplabv3')

    deeplab.add_module('backbond', backbond)
    deeplab.add_module('classifier',
                       DeepLabHead(classes=classes, num_filters=128))
    deeplab.add_module(
        'upsample',
        Upsampling2d(scale_factor=16, mode='bilinear', align_corners=False))
    model = ImageSegmentationModel(input_shape=input_shape, output=deeplab)
    return model
示例#6
0
def inverted_residual(in_filters,
                      num_filters=64,
                      strides=1,
                      expansion=4,
                      name=''):
    mid_filters = int(round(in_filters * expansion))
    layers = []
    if expansion != 1:
        layers.append(
            Conv2d_Block((1, 1),
                         num_filters=mid_filters,
                         strides=1,
                         auto_pad=True,
                         padding_mode='zero',
                         normalization='batch',
                         activation='relu6',
                         name=name + '_{0}_conv'.format(len(layers))))

    layers.append(
        DepthwiseConv2d_Block((3, 3),
                              depth_multiplier=1,
                              strides=strides,
                              auto_pad=True,
                              padding_mode='zero',
                              normalization='batch',
                              activation='relu6',
                              name=name + '_{0}_conv'.format(len(layers))))
    layers.append(
        Conv2d_Block((1, 1),
                     num_filters=num_filters,
                     strides=1,
                     auto_pad=False,
                     padding_mode='zero',
                     normalization='batch',
                     activation=None,
                     name=name + '_{0}_conv'.format(len(layers))))
    if strides == 1 and in_filters == num_filters:
        return ShortCut2d(Sequential(*layers), Identity(), activation=None)
    else:
        return Sequential(*layers)
示例#7
0
def o_net():
    return Sequential(Conv2d((3, 3),
                             32,
                             strides=1,
                             auto_pad=False,
                             use_bias=True,
                             name='conv1'),
                      PRelu(num_parameters=1),
                      MaxPool2d((3, 3), strides=2, auto_pad=False),
                      Conv2d((3, 3),
                             64,
                             strides=1,
                             auto_pad=False,
                             use_bias=True,
                             name='conv2'),
                      PRelu(num_parameters=1),
                      MaxPool2d((3, 3), strides=2, auto_pad=False),
                      Conv2d((3, 3),
                             64,
                             strides=1,
                             auto_pad=False,
                             use_bias=True,
                             name='conv3'),
                      PRelu(num_parameters=1),
                      MaxPool2d((2, 2), strides=2, auto_pad=False),
                      Conv2d((2, 2),
                             128,
                             strides=1,
                             auto_pad=False,
                             use_bias=True,
                             name='conv4'),
                      PRelu(num_parameters=1),
                      Flatten(),
                      Dense(256, activation=None, use_bias=True, name='conv5'),
                      PRelu(num_parameters=1),
                      Combine(
                          Dense(1,
                                activation='sigmoid',
                                use_bias=True,
                                name='conv6_1'),
                          Dense(4,
                                activation=None,
                                use_bias=True,
                                name='conv6_2'),
                          Dense(10,
                                activation=None,
                                use_bias=True,
                                name='conv6_3')),
                      name='onet')
示例#8
0
def DeepLabHead(classes=20, atrous_rates=(6, 12, 18, 24), num_filters=256):
    return Sequential(
        ASPP(atrous_rates, num_filters=num_filters),
        Conv2d_Block((3, 3),
                     num_filters,
                     auto_pad=True,
                     use_bias=False,
                     activation='relu',
                     normalization='batch'),
        Conv2d((1, 1),
               num_filters=classes,
               strides=1,
               auto_pad=True,
               activation='sigmoid',
               name='classifier'))
示例#9
0
def p_net():
    return Sequential(Conv2d((3, 3),
                             10,
                             strides=1,
                             auto_pad=False,
                             use_bias=True,
                             name='conv1'),
                      PRelu(num_parameters=1),
                      MaxPool2d((2, 2), strides=2, auto_pad=False),
                      Conv2d((3, 3),
                             16,
                             strides=1,
                             auto_pad=False,
                             use_bias=True,
                             name='conv2'),
                      PRelu(num_parameters=1),
                      Conv2d((3, 3),
                             32,
                             strides=1,
                             auto_pad=False,
                             use_bias=True,
                             name='conv3'),
                      PRelu(num_parameters=1),
                      Combine(
                          Conv2d((1, 1),
                                 1,
                                 strides=1,
                                 auto_pad=False,
                                 use_bias=True,
                                 activation='sigmoid',
                                 name='conv4_1'),
                          Conv2d((1, 1),
                                 4,
                                 strides=1,
                                 auto_pad=False,
                                 use_bias=True,
                                 name='conv4_2'),
                          Conv2d((1, 1),
                                 10,
                                 strides=1,
                                 auto_pad=False,
                                 use_bias=True,
                                 name='conv4_3')),
                      name='pnet')
示例#10
0
def Transition(reduction, name=''):
    """
     The block for transition-down, down-sampling by average pooling
    Args:
        reduction (float): The depth_multiplier to transition-down the dense features
        name (str): Name of the transition-down process

    Returns:
        An instrance of transition-down .


    """
    items = OrderedDict()
    items['norm'] = BatchNorm2d()
    items['relu'] = Relu()
    items['conv1'] = Conv2d((1, 1),
                            num_filters=None,
                            depth_multiplier=reduction,
                            strides=1,
                            auto_pad=True,
                            padding_mode='zero',
                            use_bias=False)
    items['pool'] = AvgPool2d(2, 2, auto_pad=True)
    return Sequential(items, name=name)
示例#11
0
def MobileNet(input_shape=(224, 224, 3),
              classes=1000,
              use_bias=False,
              width_mult=1.0,
              round_nearest=8,
              include_top=True,
              model_name='',
              **kwargs):
    input_filters = 32
    last_filters = 1280
    mobilenet = Sequential(name='mobilenet')
    inverted_residual_setting = [
        # t, c, n, s
        [1, 16, 1, 1],
        [6, 24, 2, 2],
        [6, 32, 3, 2],
        [6, 64, 4, 2],
        [6, 96, 3, 1],
        [6, 160, 3, 2],
        [6, 320, 1, 1],
    ]
    input_filters = _make_divisible(input_filters * width_mult, round_nearest)
    last_filters = _make_divisible(last_filters * max(1.0, width_mult),
                                   round_nearest)
    features = []
    features.append(
        Conv2d_Block((3, 3),
                     num_filters=input_filters,
                     strides=2,
                     auto_pad=True,
                     padding_mode='zero',
                     normalization='batch',
                     activation='relu6',
                     name='first_layer'))
    for t, c, n, s in inverted_residual_setting:
        output_filters = _make_divisible(c * width_mult, round_nearest)
        for i in range(n):
            strides = s if i == 0 else 1
            features.append(
                inverted_residual(input_filters,
                                  num_filters=output_filters,
                                  strides=strides,
                                  expansion=t,
                                  name='irb_{0}'.format(i)))
            input_filters = output_filters
    features.append(
        Conv2d_Block((1, 1),
                     last_filters,
                     auto_pad=True,
                     padding_mode='zero',
                     normalization='batch',
                     activation='relu6',
                     name='last_layer'))
    mobilenet.add_module('features', Sequential(*features, name='features'))
    mobilenet.add_module('gap', GlobalAvgPool2d())
    if include_top:
        mobilenet.add_module('drop', Dropout(0.2))
        mobilenet.add_module('fc', Dense((classes), activation=None))
        mobilenet.add_module('softmax', SoftMax(name='softmax'))
    model = ImageClassificationModel(input_shape=input_shape, output=mobilenet)
    model.signature = get_signature(model.model.forward)
    with open(os.path.join(os.path.dirname(os.path.abspath(__file__)),
                           'imagenet_labels1.txt'),
              'r',
              encoding='utf-8-sig') as f:
        labels = [l.rstrip() for l in f]
        model.class_names = labels
    model.preprocess_flow = [
        resize((224, 224), keep_aspect=True),
        normalize(127.5, 127.5)
    ]
    # model.summary()
    return model
示例#12
0
def make_vgg_layers(cfg,
                    num_classes=1000,
                    input_shape=(224, 224, 3),
                    include_top=True):
    layers = []
    in_channels = 3
    block = 1
    conv = 1
    vgg = Sequential()
    for v in cfg:
        if v == 'M':
            vgg.add_module(
                'block{0}_pool'.format(block),
                MaxPool2d(kernel_size=2,
                          strides=2,
                          use_bias=True,
                          name='block{0}_pool'.format(block)))
            block += 1
            conv = 1
        else:
            if len(vgg) == 0:
                vgg.add_module(
                    'block{0}_conv{1}'.format(block, conv),
                    Conv2d((3, 3),
                           v,
                           auto_pad=True,
                           activation=None,
                           use_bias=True,
                           name='block{0}_conv{1}'.format(block, conv)))
            else:
                vgg.add_module(
                    'block{0}_conv{1}'.format(block, conv),
                    Conv2d((3, 3),
                           v,
                           auto_pad=True,
                           activation=None,
                           use_bias=True,
                           name='block{0}_conv{1}'.format(block, conv)))

            vgg.add_module('block{0}_relu{1}'.format(block, conv),
                           Relu(name='block{0}_relu{1}'.format(block, conv)))
            conv += 1
            in_channels = v
    if include_top == True:
        vgg.add_module('flattened', Flatten())
        vgg.add_module('fc1', Dense(4096, use_bias=True, activation='relu'))
        vgg.add_module('drop1', Dropout(0.5))
        vgg.add_module('fc2', Dense(4096, use_bias=True, activation='relu'))
        vgg.add_module('drop2', Dropout(0.5))
        vgg.add_module('fc3',
                       Dense(num_classes, use_bias=True, activation='softmax'))

    model = ImageClassificationModel(input_shape=input_shape, output=vgg)
    model.signature = get_signature(model.model.forward)
    with open(os.path.join(os.path.dirname(os.path.abspath(__file__)),
                           'imagenet_labels1.txt'),
              'r',
              encoding='utf-8-sig') as f:
        labels = [l.rstrip() for l in f]
        model.class_names = labels
    model.preprocess_flow = [
        Resize((input_shape[0], input_shape[1]), keep_aspect=True),
        to_bgr(),
        Normalize([103.939, 116.779, 123.68], [1, 1, 1])
    ]

    # model.summary()

    return model
示例#13
0
def DenseNet(blocks,
             growth_rate=32,
             initial_filters=64,
             include_top=True,
             pretrained=True,
             input_shape=(224, 224, 3),
             num_classes=1000,
             name='',
             **kwargs):
    """'
    Instantiates the DenseNet architecture.
    Optionally loads weights pre-trained on ImageNet.

    Args
        blocks (tuple/ list of int ): numbers of building blocks for the dense layers.

        growth_rate (int):The growth rate regulates how much new information each layer contributes to the global state

        initial_filters (int): the channel of the first convolution layer

        pretrained (bool): If True, returns a model pre-trained on ImageNet.

        input_shape (tuple or list): the default input image size in CHW order (C, H, W)

        num_classes (int): number of classes

        name (string): anme of the model

    Returns
        A trident image classification model instance.

    """
    densenet = Sequential()
    densenet.add_module(
        'conv1/conv',
        Conv2d_Block((7, 7),
                     num_filters=initial_filters,
                     strides=2,
                     use_bias=False,
                     auto_pad=True,
                     padding_mode='zero',
                     activation='relu',
                     normalization='batch',
                     name='conv1/conv'))
    densenet.add_module('maxpool', (MaxPool2d(
        (3, 3), strides=2, auto_pad=True, padding_mode='zero')))
    densenet.add_module('denseblock1',
                        DenseBlock(blocks[0], growth_rate=growth_rate))
    densenet.add_module('transitiondown1', Transition(0.5))
    densenet.add_module('denseblock2',
                        DenseBlock(blocks[1], growth_rate=growth_rate))
    densenet.add_module('transitiondown2', Transition(0.5))
    densenet.add_module('denseblock3',
                        DenseBlock(blocks[2], growth_rate=growth_rate))
    densenet.add_module('transitiondown3', Transition(0.5))
    densenet.add_module('denseblock4',
                        DenseBlock(blocks[3], growth_rate=growth_rate))
    densenet.add_module('classifier_norm', BatchNorm2d(name='classifier_norm'))
    densenet.add_module('classifier_relu', Relu(name='classifier_relu'))
    densenet.add_module('avg_pool', GlobalAvgPool2d(name='avg_pool'))
    if include_top:
        densenet.add_module(
            'classifier', Dense(num_classes,
                                activation=None,
                                name='classifier'))
        densenet.add_module('softmax', SoftMax(name='softmax'))
    densenet.name = name

    model = ImageClassificationModel(input_shape=input_shape, output=densenet)

    with open(os.path.join(os.path.dirname(os.path.abspath(__file__)),
                           'imagenet_labels1.txt'),
              'r',
              encoding='utf-8-sig') as f:
        labels = [l.rstrip() for l in f]
        model.class_names = labels
    model.preprocess_flow = [
        resize((input_shape[0], input_shape[1]), keep_aspect=True),
        normalize(0, 255),
        normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
    ]
    # model.summary()
    return model
示例#14
0
def ASPPPooling(num_filters):
    return Sequential(
        AdaptiveAvgPool2d((1, 1)),
        Conv2d((1, 1), num_filters, strides=1, use_bias=False,
               activation=None),
        Upsampling2d(scale_factor=14, mode='bilinear', align_corners=False))