Ejemplo n.º 1
0
def basic_rfb(num_filters, scale=0.1):
    return ShortCut2d(Sequential(
        ShortCut2d(
            Sequential(
                Conv2d_Block((1, 1), depth_multiplier=0.125, strides=1, groups=1, auto_pad=False, use_bias=False, activation=None,
                             normalization='batch'),
                Conv2d_Block((3, 3), depth_multiplier=2, strides=1, groups=1, padding=(1, 1), use_bias=False, activation='relu',
                             normalization='batch'),
                Conv2d_Block((3, 3), depth_multiplier=1, strides=1, groups=1, padding=(2, 2), use_bias=False, dilation=2,
                             activation=None, normalization='batch'), name='branch1'),

            Sequential(
                Conv2d_Block((1, 1), depth_multiplier=0.125, strides=1, groups=1, auto_pad=False, use_bias=False, activation=None,
                             normalization='batch'),
                Conv2d_Block((3, 3), depth_multiplier=2, strides=1, groups=1, padding=(1, 1), use_bias=False, activation='relu',
                             normalization='batch'),
                Conv2d_Block((3, 3), depth_multiplier=1, strides=1, groups=1, padding=(3, 3), use_bias=False, dilation=3,
                             activation=None, normalization='batch'), name='branch2'),
            Sequential(
                Conv2d_Block((1, 1), depth_multiplier=0.125, strides=1, groups=1, auto_pad=False, use_bias=False, activation=None,
                             normalization='batch'),
                Conv2d_Block((3, 3), depth_multiplier=1.5, strides=1, groups=1, padding=(1, 1), use_bias=False, activation='relu',
                             normalization='batch'),
                Conv2d_Block((3, 3), depth_multiplier=1.33, strides=1, groups=1, padding=(1, 1), use_bias=False,
                             activation='relu', normalization='batch'),
                Conv2d_Block((3, 3), depth_multiplier=1, strides=1, groups=1, padding=(5, 5), use_bias=False, dilation=5,
                             activation=None, normalization='batch'), name='branch3')
            , mode='concate'),

        Conv2d_Block((1, 1), num_filters=num_filters, strides=1, groups=1, auto_pad=True, use_bias=False,
                     activation=None, normalization='batch')),
        Conv2d_Block((1, 1), num_filters=num_filters, strides=1, groups=1, auto_pad=True, use_bias=False,
                     activation=None, normalization='batch'), mode='add', activation='relu')
Ejemplo n.º 2
0
    def __init__(self, *args, axis=1, activation='relu'):
        """

        Parameters
        ----------
        layer_defs : object
        """
        super(ConcateBlock, self).__init__()
        self.activation = get_activation(activation)
        self.axis = axis
        self.has_identity = False
        for i in range(len(args)):
            arg = args[i]
            if isinstance(arg, (Layer, list, dict)):
                if isinstance(arg, list):
                    arg = Sequential(*arg)
                elif isinstance(arg, dict) and len(args) == 1:
                    for k, v in arg.items():
                        if isinstance(v, Identity):
                            self.has_identity = True
                            self.add_module('Identity', v)
                        else:
                            self.add_module(k, v)
                elif isinstance(arg, dict) and len(args) > 1:
                    raise ValueError(
                        'more than one dict argument is not support.')
                elif isinstance(arg, Identity):
                    self.has_identity = True
                    self.add_module('Identity', arg)
                else:
                    self.add_module('branch{0}'.format(i + 1), arg)
        if len(self._modules) == 1 and self.has_identity == False:
            self.add_module('Identity', Identity())
        self.to(self.device)
Ejemplo n.º 3
0
    def __init__(self, backbond, input_shape=(3,224,224), atrous_rates=(6, 12, 18, 24), num_filters=256, classes=20):
        super(_DeeplabV3_plus, self).__init__()
        moduals=list(backbond.children())
        low_level_idx=-1
        high_level_idx=-1
        for i in range(len(moduals)):
            if low_level_idx<0 and moduals[i].output_shape[-1]==backbond.input_shape[-1]//8:
                low_level_idx=i

            if high_level_idx<0 and moduals[i].output_shape[-1]==backbond.input_shape[-1]//32:
                high_level_idx=i
                break
        self.num_filters=num_filters
        self.classes=classes
        self.atrous_rates=atrous_rates
        self.backbond1=Sequential(*backbond[:low_level_idx])
        self.backbond2 = Sequential(*backbond[low_level_idx:high_level_idx])
        self.aspp=ASPP(atrous_rates=self.atrous_rates,num_filters=self.num_filters)
        self.low_level_conv=Conv2d_Block((1,1),num_filters=int(48*self.num_filters/256),strides=1,use_bias=False,activation='leaky_relu',normalization='batch')
        self.decoder=Sequential(
            DepthwiseConv2d_Block((3,3),depth_multiplier=0.5,strides=1,use_bias=False,activation='leaky_relu',normalization='batch',dropout_rate=0.5),
            DepthwiseConv2d_Block((3,3),depth_multiplier=1,strides=1,use_bias=False,activation='leaky_relu',normalization='batch',dropout_rate=0.1),
            Conv2d((1, 1), num_filters=self.classes, strides=1, use_bias=False, activation=None),
            SoftMax()

        )
Ejemplo n.º 4
0
    def _make_layer(block,
                    num_filters,
                    blocklayers,
                    strides=1,
                    dilate=False,
                    use_bias=use_bias,
                    layer_name=''):
        conv_shortcut = False
        if strides != 1 or block is bottleneck:
            conv_shortcut = True
        layers = []
        layers.append(
            block(num_filters=num_filters,
                  strides=strides,
                  expansion=4,
                  conv_shortcut=conv_shortcut,
                  use_bias=use_bias,
                  name=layer_name + '_0'))

        for k in range(1, blocklayers):
            layers.append(
                block(num_filters=num_filters,
                      strides=1,
                      expansion=4,
                      conv_shortcut=False,
                      use_bias=use_bias,
                      name=layer_name + '_{0}'.format(k)))

        laters_block = Sequential(*layers)
        laters_block.name = layer_name
        return laters_block
Ejemplo n.º 5
0
def Block(dim,
          num_heads,
          mlp_ratio=4.,
          qkv_bias=False,
          qk_scale=None,
          drop=0.,
          attn_drop=0.,
          drop_path=0):
    mlp_hidden_dim = int(dim * mlp_ratio)
    return Sequential(
        ShortCut(Identity(),
                 Sequential(
                     LayerNorm(),
                     Attention(dim,
                               num_heads=num_heads,
                               qkv_bias=qkv_bias,
                               qk_scale=qk_scale,
                               attn_drop=attn_drop,
                               proj_drop=drop), DropPath()),
                 mode='add'),
        ShortCut(Identity(),
                 Sequential(
                     LayerNorm(),
                     Mlp(hidden_features=mlp_hidden_dim,
                         out_features=dim,
                         drop=drop), DropPath()),
                 mode='add'))
Ejemplo n.º 6
0
    def __init__(self, *args, base_filters=16, num_classes=2, num_regressors=4,detection_threshold=0.7, nms_threshold=0.3, center_variance=0.1, size_variance=0.2,
                 name='tiny_mobile_rfbnet', **kwargs):
        """

        Parameters
        ----------
        layer_defs : object
        """
        super(RFBnet, self).__init__(name=name)
        self.base_filters = base_filters
        backbond = tiny_mobile_rfbnet(self.base_filters)
        self.backbond1 = Sequential(*backbond[:8], name='backbond1')
        self.backbond2 = Sequential(*backbond[8:11], name='backbond2')
        self.backbond3 = Sequential(*backbond[11:13], name='backbond3')
        self.detection_threshold = detection_threshold
        self.nms_threshold =nms_threshold
        self.variance = (center_variance, size_variance)

        self.num_classes = num_classes
        self.num_regressors = num_regressors
        self.register_buffer("priors", None)
        self.define_img_size(640)

        self.extra = Sequential(Conv2d((1, 1), num_filters=64, strides=1, activation='relu', use_bias=True),
                                DepthwiseConv2d((3, 3), depth_multiplier=1, strides=2, auto_pad=True, activation='relu',
                                                use_bias=True),
                                Conv2d((1, 1), num_filters=256, strides=1, activation=None, use_bias=True), Relu(), name='extra')
        self.regression_headers = ModuleList([Sequential(
            DepthwiseConv2d((3, 3), depth_multiplier=1, strides=1, auto_pad=True, activation='relu', use_bias=True),
            Conv2d((1, 1), num_filters=3 * self.num_regressors, strides=1, activation=None, use_bias=True)),

            Sequential(
                DepthwiseConv2d((3, 3), depth_multiplier=1, strides=1, auto_pad=True, activation='relu', use_bias=True),
                Conv2d((1, 1), num_filters=2 * self.num_regressors, strides=1, activation=None, use_bias=True)),

            Sequential(
                DepthwiseConv2d((3, 3), depth_multiplier=1, strides=1, auto_pad=True, activation='relu', use_bias=True),
                Conv2d((1, 1), num_filters=2 * self.num_regressors, strides=1, activation=None, use_bias=True)),

            Conv2d((3, 3), num_filters=3 * self.num_regressors, strides=1, auto_pad=True, activation=None), ], name='regression_headers')
        self.classification_headers = ModuleList([
            Sequential(
                DepthwiseConv2d((3, 3), depth_multiplier=1, strides=1, auto_pad=True, activation='relu', use_bias=True),
                Conv2d((1, 1), num_filters=3 * self.num_classes, strides=1, activation=None, use_bias=True)),

            Sequential(
                DepthwiseConv2d((3, 3), depth_multiplier=1, strides=1, auto_pad=True, activation='relu', use_bias=True),
                Conv2d((1, 1), num_filters=2 * self.num_classes, strides=1, activation=None, use_bias=True)),

            Sequential(
                DepthwiseConv2d((3, 3), depth_multiplier=1, strides=1, auto_pad=True, activation='relu', use_bias=True),
                Conv2d((1, 1), num_filters=2 * self.num_classes, strides=1, activation=None, use_bias=True)),

            Conv2d((3, 3), num_filters=3 * self.num_classes, strides=1, auto_pad=True, activation=None,
                   use_bias=True), ], name='classification_headers')
Ejemplo n.º 7
0
def InceptionA(pool_features, name=None):
    return ShortCut2d(Conv2d_Block((1, 1),
                                   num_filters=64,
                                   use_bias=False,
                                   normalization='batch',
                                   activation='relu',
                                   name='branch1x1'),
                      Sequential(
                          Conv2d_Block((1, 1),
                                       num_filters=48,
                                       use_bias=False,
                                       normalization='batch',
                                       activation='relu',
                                       name='branch5x5_1'),
                          Conv2d_Block((5, 5),
                                       num_filters=64,
                                       auto_pad=True,
                                       use_bias=False,
                                       normalization='batch',
                                       activation='relu',
                                       name='branch5x5_2')),
                      Sequential(
                          Conv2d_Block((1, 1),
                                       num_filters=64,
                                       use_bias=False,
                                       normalization='batch',
                                       activation='relu',
                                       name='branch3x3dbl_1'),
                          Conv2d_Block((3, 3),
                                       num_filters=96,
                                       auto_pad=True,
                                       use_bias=False,
                                       normalization='batch',
                                       activation='relu',
                                       name='branch3x3dbl_2'),
                          Conv2d_Block((3, 3),
                                       num_filters=96,
                                       auto_pad=True,
                                       use_bias=False,
                                       normalization='batch',
                                       activation='relu',
                                       name='branch3x3dbl_3')),
                      Sequential(
                          AvgPool2d((3, 3), strides=1, auto_pad=True),
                          Conv2d_Block((1, 1),
                                       num_filters=pool_features,
                                       use_bias=False,
                                       normalization='batch',
                                       activation='relu',
                                       name='branch_pool')),
                      mode='concate',
                      name=name)
Ejemplo n.º 8
0
    def build_discriminator():
        layers = []
        layers.append(
            Conv2d((5, 5), 32, strides=1, auto_pad=True, use_bias=False, activation=activation, name='first_layer'))
        layers.append(Conv2d_Block((3, 3), 64, strides=2, auto_pad=True, use_spectral=use_spectral, use_bias=False,
                                   activation=activation, normalization=discriminator_norm, name='second_layer'))
        filter = 64
        current_width = image_width // 2
        i = 0
        while current_width > 8:
            filter = filter * 2 if i % 2 == 1 else filter
            if discriminator_build_block == BuildBlockMode.base.value:
                layers.append(
                    Conv2d_Block((3, 3), num_filters=filter, strides=2, auto_pad=True, use_spectral=use_spectral, use_bias=False,
                                 activation=activation, normalization=discriminator_norm,
                                 name='base_block{0}'.format(i)))
            elif discriminator_build_block == BuildBlockMode.resnet.value:
                layers.extend(resnet_block(num_filters=filter, strides=2, activation=activation, use_spectral=use_spectral,
                                           normalization=discriminator_norm, name='resnet_block{0}'.format(i)))

            elif discriminator_build_block == BuildBlockMode.bottleneck.value:
                layers.append(
                    bottleneck_block(num_filters=filter, strides=2, reduce=2, activation=activation, use_spectral=use_spectral,
                                     normalization=discriminator_norm, name='bottleneck_block{0}'.format(i)))

            current_width = current_width // 2
            i = i + 1
        if use_self_attention:
            layers.insert(-2, SelfAttention(8, name='self_attention'))
        if use_dropout:
            layers.insert(-1, Dropout(0.5))
        layers.append(Conv2d_Block((3, 3), 128, strides=2, auto_pad=True, use_bias=False, activation='leaky_relu', use_spectral=use_spectral, normalization=discriminator_norm,
                                   name='last_conv'))
        layers.append(Flatten()),
        if use_minibatch_discrimination:
            layers.append(MinibatchDiscriminationLayer(name='minibatch_dis'))
        layers.append(Dense(1, use_bias=False, name='fc'))
        layers.append(Sigmoid())
        dis = Sequential(layers, name='discriminator')
        out = dis(to_tensor(TensorShape([None, 3, image_width, image_width]).get_dummy_tensor()).to(get_device()))
        if use_spectral:
            new_layers = []
            for layer in dis:
                if isinstance(layer, Dense):
                    new_layers.append(torch.nn.utils.spectral_norm(layer))
                else:
                    new_layers.append(layer)
            return Sequential(new_layers, name='discriminator')
        else:
            return dis
Ejemplo n.º 9
0
def InceptionD(name=None):
    return ShortCut2d(Sequential(
        Conv2d_Block((1, 1),
                     num_filters=192,
                     use_bias=False,
                     normalization='batch',
                     activation='relu',
                     name='branch3x3_1'),
        Conv2d_Block((3, 3),
                     num_filters=320,
                     strides=2,
                     use_bias=False,
                     normalization='batch',
                     activation='relu',
                     name='branch3x3_2'),
    ),
                      Sequential(
                          Conv2d_Block((1, 1),
                                       num_filters=192,
                                       use_bias=False,
                                       normalization='batch',
                                       activation='relu',
                                       name='branch7x7x3_1'),
                          Conv2d_Block((1, 7),
                                       num_filters=192,
                                       auto_pad=True,
                                       use_bias=False,
                                       normalization='batch',
                                       activation='relu',
                                       name='branch7x7x3_2'),
                          Conv2d_Block((7, 1),
                                       num_filters=192,
                                       auto_pad=True,
                                       use_bias=False,
                                       normalization='batch',
                                       activation='relu',
                                       name='branch7x7x3_3'),
                          Conv2d_Block((3, 3),
                                       num_filters=192,
                                       strides=2,
                                       use_bias=False,
                                       normalization='batch',
                                       activation='relu',
                                       name='branch7x7x3_4'),
                      ),
                      MaxPool2d((3, 3), strides=2),
                      mode='concate',
                      name=name)
Ejemplo n.º 10
0
def InceptionB(name=None):
    return ShortCut2d(Conv2d_Block((3, 3),
                                   num_filters=384,
                                   strides=2,
                                   use_bias=False,
                                   normalization='batch',
                                   activation='relu'),
                      Sequential(
                          Conv2d_Block((1, 1),
                                       num_filters=64,
                                       use_bias=False,
                                       normalization='batch',
                                       activation='relu'),
                          Conv2d_Block((3, 3),
                                       num_filters=96,
                                       auto_pad=True,
                                       use_bias=False,
                                       normalization='batch',
                                       activation='relu'),
                          Conv2d_Block((3, 3),
                                       num_filters=96,
                                       strides=2,
                                       use_bias=False,
                                       normalization='batch',
                                       activation='relu')),
                      MaxPool2d((3, 3), strides=2),
                      mode='concate',
                      name=name)
Ejemplo n.º 11
0
def BottleNeck_IR_SE(num_filters, strides, keep_filter=True):
    blocks = OrderedDict()
    blocks['res_layer'] = Sequential(BatchNorm2d(),
                                     Conv2d_Block(
                                         (3, 3),
                                         num_filters=num_filters,
                                         strides=1,
                                         auto_pad=True,
                                         use_bias=False,
                                         activation=PRelu(num_filters)),
                                     Conv2d_Block((3, 3),
                                                  num_filters,
                                                  strides=strides,
                                                  use_bias=False,
                                                  activation=None,
                                                  normalization='batch'),
                                     SqueezeExcite(num_filters // 16,
                                                   num_filters),
                                     name='res_layer')
    if keep_filter:
        blocks['shortcut_layer'] = MaxPool2d(1,
                                             strides=strides,
                                             name='shortcut_layer')

    else:
        blocks['shortcut_layer'] = Conv2d_Block((1, 1),
                                                num_filters,
                                                strides=strides,
                                                use_bias=False,
                                                activation=None,
                                                normalization='batch',
                                                name='shortcut_layer')
    return ShortCut2d(blocks, mode='add')
Ejemplo n.º 12
0
def conv_dw(num_filters, strides):
    return Sequential(
        DepthwiseConv2d_Block((3, 3), depth_multiplier=1, strides=strides, use_bias=False, activation='relu',
                              normalization='batch'),
        Conv2d_Block((1, 1), num_filters=num_filters, strides=1, groups=1, auto_pad=True, use_bias=False,
                     activation='relu', normalization='batch'),
    )
Ejemplo n.º 13
0
def DenseLayer(growth_rate, name=''):
    """
    The basic normalization, convolution and activation combination for dense connection

    Args:
        growth_rate (int):The growth rate regulates how much new information each layer contributes to the global state
        name (str): None of this dense layer

    Returns:
        An instrance of dense layer.

    """
    items = OrderedDict()
    items['norm'] = BatchNorm2d()
    items['relu'] = Relu()
    items['conv1'] = Conv2d_Block((1, 1),
                                  4 * growth_rate,
                                  strides=1,
                                  activation='relu',
                                  auto_pad=True,
                                  padding_mode='zero',
                                  use_bias=False,
                                  normalization='batch')
    items['conv2'] = Conv2d((3, 3),
                            growth_rate,
                            strides=1,
                            auto_pad=True,
                            padding_mode='zero',
                            use_bias=False)
    return Sequential(items)
Ejemplo n.º 14
0
def DeepLabHead(classes=20, atrous_rates=(6, 12, 18,24),num_filters=256):
    return Sequential(
        ASPP(atrous_rates,num_filters=num_filters),
        Conv2d_Block((3,3),num_filters,auto_pad=True,use_bias=False,activation='relu',normalization='batch'),
        Conv2d((1,1),num_filters=classes,strides=1,auto_pad=True,activation=None,name='classifier'),
        SoftMax()
        )
Ejemplo n.º 15
0
def VisionTransformer(patch_size=16,
                      num_classes=1000,
                      depth=12,
                      num_heads=12,
                      mlp_ratio=4.,
                      qkv_bias=True,
                      qk_scale=None,
                      representation_size=None,
                      drop_rate=0.,
                      attn_drop_rate=0.,
                      drop_path_rate=0.,
                      hybrid_backbone=None):

    embed_dim = patch_size * patch_size * 3
    representation_size = embed_dim
    dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)]
    return Sequential(
        ImageToPatch(patch_size=patch_size),
        PositionEmbed(drop_rate=0, mode='trainable'),
        For(
            range(depth), lambda i: Block(dim=embed_dim,
                                          num_heads=num_heads,
                                          mlp_ratio=mlp_ratio,
                                          qkv_bias=qkv_bias,
                                          qk_scale=qk_scale,
                                          drop=drop_rate,
                                          attn_drop=attn_drop_rate,
                                          drop_path=dpr[i])), LayerNorm(),
        Aggregation(mode='mean', axis=1, keepdims=False),
        Dense(num_filters=num_classes, activation=None), SoftMax())
Ejemplo n.º 16
0
def ASPP(atrous_rates=(6, 12, 18), num_filters=256):
    layers = OrderedDict()
    layers['conv1'] = Conv2d_Block((1, 1),
                                   num_filters=num_filters,
                                   strides=1,
                                   use_bias=False,
                                   activation=None,
                                   normalization='batch')
    for i in range(len(atrous_rates)):
        layers['aspp_dilation{0}'.format(i)] = Conv2d_Block(
            (3, 3),
            num_filters=num_filters,
            strides=1,
            use_bias=False,
            activation=None,
            normalization='batch',
            dilation=atrous_rates[i])
    layers['aspp_pooling'] = ASPPPooling(num_filters)
    return Sequential(
        ShortCut2d(layers, mode='concate'),
        Conv2d_Block((1, 1),
                     num_filters,
                     strides=1,
                     use_bias=False,
                     bias=False,
                     activation='relu',
                     normalization='batch',
                     dilation=1,
                     dropout_rate=0.5,
                     name='project'))
Ejemplo n.º 17
0
def TransformerGenerator(patch_size=8,
                         depth=5,
                         num_heads=4,
                         mlp_ratio=4.,
                         qkv_bias=False,
                         qk_scale=None,
                         representation_size=None,
                         drop_rate=0.,
                         attn_drop_rate=0.,
                         drop_path_rate=0.,
                         hybrid_backbone=None):
    bottom_width = 2
    current_width = 2
    embed_dim = patch_size * patch_size * 3
    representation_size = embed_dim
    dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)]
    return Sequential(
        Dense(num_filters=bottom_width * bottom_width * embed_dim),
        Reshape((bottom_width * bottom_width, embed_dim)),
        PositionEmbed(drop_rate=0, mode='trainable', use_cls_token=False),
        For(
            range(depth), lambda i: Block(dim=embed_dim,
                                          num_heads=num_heads,
                                          mlp_ratio=mlp_ratio,
                                          qkv_bias=qkv_bias,
                                          qk_scale=qk_scale,
                                          drop=drop_rate,
                                          attn_drop=attn_drop_rate,
                                          drop_path=dpr[i])),
        For(
            range(3), lambda i: Sequential(
                TransformerUpsampling(scale_factor=2),
                PositionEmbed(drop_rate=0,
                              mode='trainable',
                              use_cls_token=False),
                For(
                    range(3), lambda n: Block(dim=embed_dim,
                                              num_heads=num_heads,
                                              mlp_ratio=mlp_ratio,
                                              qkv_bias=qkv_bias,
                                              qk_scale=qk_scale,
                                              drop=drop_rate,
                                              attn_drop=attn_drop_rate,
                                              drop_path=0)))), Tanh(),
        Permute(0, 2, 1),
        Reshape((embed_dim, current_width * 8, current_width * 8)),
        PatchToImage(patch_size=patch_size))
Ejemplo n.º 18
0
def resblock_body(num_filters,
                  num_blocks,
                  all_narrow=True,
                  keep_output=False,
                  name=''):
    return Sequential(
        DarknetConv2D_BN_Mish((3, 3),
                              num_filters,
                              strides=(2, 2),
                              auto_pad=False,
                              padding=((1, 0), (1, 0)),
                              name=name + '_preconv1'),
        ShortCut2d(
            {
                1:
                DarknetConv2D_BN_Mish(
                    (1, 1),
                    num_filters // 2 if all_narrow else num_filters,
                    name=name + '_shortconv'),
                0:
                Sequential(
                    DarknetConv2D_BN_Mish(
                        (1, 1),
                        num_filters // 2 if all_narrow else num_filters,
                        name=name + '_mainconv'),
                    For(
                        range(num_blocks), lambda i: ShortCut2d(
                            Identity(),
                            Sequential(
                                DarknetConv2D_BN_Mish(
                                    (1, 1),
                                    num_filters // 2,
                                    name=name + '_for{0}_1'.format(i)),
                                DarknetConv2D_BN_Mish(
                                    (3, 3),
                                    num_filters // 2
                                    if all_narrow else num_filters,
                                    name=name + '_for{0}_2'.format(i))),
                            mode='add')),
                    DarknetConv2D_BN_Mish(
                        (1, 1),
                        num_filters // 2 if all_narrow else num_filters,
                        name=name + '_postconv'))
            },
            mode='concate',
            name=name + '_route'),
        DarknetConv2D_BN_Mish((1, 1), num_filters, name=name + '_convblock5'))
Ejemplo n.º 19
0
def basic_block(num_filters=64,base_width=64,strides=1,expansion = 4,conv_shortcut=False,use_bias=False,name=''):
    shortcut = Identity()
    if strides>1 or conv_shortcut is True:
        shortcut =Conv2d_Block((1,1),num_filters=num_filters,strides=strides,auto_pad=True,padding_mode='zero',normalization='batch',activation=None,use_bias=use_bias)

    return ShortCut2d(Sequential(Conv2d_Block((3,3),num_filters=num_filters,strides=strides,auto_pad=True,padding_mode='zero',normalization='batch',activation='relu',use_bias=use_bias),
                                 Conv2d_Block((3,3),num_filters=num_filters,strides=1,auto_pad=True,padding_mode='zero',normalization='batch',activation=None,use_bias=use_bias)),
                      shortcut,activation='relu')
Ejemplo n.º 20
0
def se_bottleneck(num_filters=64,
                  strides=1,
                  expansion=4,
                  conv_shortcut=True,
                  use_bias=False,
                  name=''):
    #width = int(num_filters * (base_width / 64.)) * 1#groups'
    shortcut = Identity()
    shortcut_name = 'Identity'
    if strides > 1 or conv_shortcut is True:
        shortcut = Conv2d_Block((1, 1),
                                num_filters=num_filters * expansion,
                                strides=strides,
                                auto_pad=True,
                                padding_mode='zero',
                                normalization='batch',
                                activation=None,
                                use_bias=use_bias,
                                name='downsample')
        shortcut_name = 'downsample'
    return ShortCut2d(
        {
            'branch1':
            Sequential(
                Conv2d_Block((1, 1),
                             num_filters=num_filters,
                             strides=strides,
                             auto_pad=True,
                             padding_mode='zero',
                             normalization='batch',
                             activation='relu',
                             use_bias=use_bias,
                             name='conv1'),
                Conv2d_Block((3, 3),
                             num_filters=num_filters,
                             strides=1,
                             auto_pad=True,
                             padding_mode='zero',
                             normalization='batch',
                             activation='relu',
                             use_bias=use_bias,
                             name='conv2'),
                Conv2d_Block((1, 1),
                             num_filters=num_filters * expansion,
                             strides=1,
                             auto_pad=True,
                             padding_mode='zero',
                             normalization='batch',
                             activation=None,
                             use_bias=use_bias,
                             name='conv3'),
                SqueezeExcite(se_filters=num_filters // expansion,
                              num_filters=num_filters * expansion,
                              use_bias=True)),
            shortcut_name:
            shortcut
        },
        activation='relu')
Ejemplo n.º 21
0
def darknet_body():
    return Sequential(
        DarknetConv2D_BN_Mish((3, 3), 32),
        resblock_body(64, 1, all_narrow=False),
        resblock_body(128, 2),
        resblock_body(256, 8),
        resblock_body(512, 8),
        resblock_body(1024, 4)
    )
Ejemplo n.º 22
0
def DeeplabV3(backbond,
             input_shape=(3,224,224),
             classes=20,
             **kwargs):
    input_shape=tuple(input_shape)
    deeplab=Sequential(name='deeplabv3')

    deeplab.add_module('backbond',backbond)
    deeplab.add_module('classifier', DeepLabHead(classes=classes,num_filters=128))
    deeplab.add_module('upsample', Upsampling2d(scale_factor=16, mode='bilinear', align_corners=False))
    model = ImageSegmentationModel(input_shape=input_shape, output=deeplab)
    return model
Ejemplo n.º 23
0
def Mlp(hidden_features=None, out_features=None, drop=0):
    return Sequential(
        FullConnect_Block(num_filters=hidden_features,
                          activation=Gelu(),
                          dropout_rate=drop,
                          normalization=None),
        FullConnect_Block(num_filters=out_features,
                          activation=Gelu(),
                          dropout_rate=drop,
                          normalization=None),
    )
Ejemplo n.º 24
0
def separable_resnet_block(num_filters=64, strides=1, activation='leaky_relu', normalization='batch',
                           use_spectral=False, dilation=1, name=''):
    kernal = 1 if strides == 1 else 3
    return [ShortCut2d(Sequential(
        SeparableConv2d_Block((3, 3), depth_multiplier=1, strides=1, auto_pad=True, padding_mode='replicate',
                              use_spectral=use_spectral, normalization=normalization, activation=activation,
                              use_bias=False, dilation=dilation, name=name + '_0_conv'),
        Conv2d_Block((1, 1), depth_multiplier=2, strides=1, auto_pad=True, padding_mode='replicate',
                     use_spectral=use_spectral, normalization=normalization, activation=activation, use_bias=False,
                     name=name + '_1_conv')), Identity(), activation=activation, name=name),
        Conv2d_Block((kernal, kernal), num_filters=num_filters, strides=strides, auto_pad=True,
                     padding_mode='replicate', use_spectral=use_spectral, normalization=normalization,
                     activation=activation, use_bias=False, name=name + '_conv')]
Ejemplo n.º 25
0
def bottleneck_block(num_filters=64,
                     strides=1,
                     reduce=4,
                     activation='leaky_relu',
                     normalization='batch',
                     use_spectral=False,
                     dilation=1,
                     name=''):
    shortcut = Conv2d_Block((3, 3),
                            num_filters=num_filters,
                            strides=strides,
                            auto_pad=True,
                            padding_mode='zero',
                            normalization=normalization,
                            activation=None,
                            name=name + '_downsample')
    return ShortCut2d(Sequential(
        Conv2d_Block((1, 1),
                     depth_multiplier=1,
                     strides=1,
                     auto_pad=True,
                     padding_mode='replicate',
                     use_spectral=use_spectral,
                     normalization=normalization,
                     activation=activation,
                     use_bias=False,
                     name=name + '_0_conv'),
        Conv2d_Block((3, 3),
                     depth_multiplier=1 / reduce,
                     strides=strides,
                     auto_pad=True,
                     padding_mode='replicate',
                     use_spectral=use_spectral,
                     normalization=normalization,
                     activation=activation,
                     use_bias=False,
                     dilation=dilation,
                     name=name + '_1_conv'),
        Conv2d_Block((1, 1),
                     num_filters=num_filters,
                     strides=1,
                     auto_pad=True,
                     padding_mode='replicate',
                     use_spectral=use_spectral,
                     normalization=normalization,
                     activation=None,
                     use_bias=False,
                     name=name + '_2_conv')),
                      shortcut,
                      activation=activation,
                      name=name)
Ejemplo n.º 26
0
def SEResNet_IR(include_top=True,num_layers=50,Bottleneck=BottleNeck_IR_SE,drop_ratio=0.4,feature_dim=128,input_shape=(3,112,112)):
    blocks=OrderedDict()
    blocks['input_layer']=Conv2d_Block((3,3),64,strides=1,auto_pad=True,use_bias=False,activation=PRelu(64),normalization='batch',name='input_layer')
    blocks['body']=Sequential(
        get_block(Bottleneck, out_channel=64, num_units=3,keep_filter=True)+
        get_block(Bottleneck, out_channel=128, num_units=4,keep_filter=False)+
        get_block(Bottleneck, out_channel=256, num_units=14,keep_filter=False)+
        get_block(Bottleneck, out_channel=512, num_units=3,keep_filter=False)
    )
    blocks['output_layer']=Sequential(
        BatchNorm2d(),
        Dropout(drop_ratio),
        Flatten(),
        Dense(feature_dim),
        BatchNorm(),
        name='output_layer'
    )
    facenet=Sequential(blocks).to(_device)
    facenet.name=camel2snake('SEResNet_IR')
    model=FaceRecognitionModel(input_shape=input_shape,output=facenet)
    model.preprocess_flow=[Resize((input_shape[1],input_shape[2]),keep_aspect=True),Normalize(0,255),Normalize([0.485, 0.456, 0.406],[0.229, 0.224, 0.225])]
    #model.summary()
    return model
Ejemplo n.º 27
0
def p_net():
    return Sequential(Conv2d((3, 3),
                             10,
                             strides=1,
                             auto_pad=False,
                             use_bias=True,
                             name='conv1'),
                      PRelu(num_parameters=1),
                      MaxPool2d((2, 2), strides=2, auto_pad=False),
                      Conv2d((3, 3),
                             16,
                             strides=1,
                             auto_pad=False,
                             use_bias=True,
                             name='conv2'),
                      PRelu(num_parameters=1),
                      Conv2d((3, 3),
                             32,
                             strides=1,
                             auto_pad=False,
                             use_bias=True,
                             name='conv3'),
                      PRelu(num_parameters=1),
                      ModuleDict(
                          {
                              'conv4_1':
                              Conv2d((1, 1),
                                     1,
                                     strides=1,
                                     auto_pad=False,
                                     use_bias=True,
                                     activation='sigmoid',
                                     name='conv4_1'),
                              'conv4_2':
                              Conv2d((1, 1),
                                     4,
                                     strides=1,
                                     auto_pad=False,
                                     use_bias=True,
                                     name='conv4_2'),
                              'conv4_3':
                              Conv2d((1, 1),
                                     10,
                                     strides=1,
                                     auto_pad=False,
                                     use_bias=True,
                                     name='conv4_3')
                          },
                          is_multicasting=True),
                      name='pnet')
Ejemplo n.º 28
0
def InceptionAux(num_classes=1000, name=None):
    return Sequential(
        AvgPool2d((5, 5), strides=3),
        Conv2d_Block((1, 1),
                     num_filters=128,
                     use_bias=False,
                     normalization='batch',
                     activation='relu'),
        Conv2d_Block((5, 5),
                     num_filters=768,
                     use_bias=False,
                     normalization='batch',
                     activation='relu'), AdaptiveAvgPool2d((1, 1)), Flatten(),
        Dense(num_classes))
Ejemplo n.º 29
0
def inverted_residual(in_filters,
                      num_filters=64,
                      strides=1,
                      expansion=4,
                      name=''):
    mid_filters = int(round(in_filters * expansion))
    layers = []
    if expansion != 1:
        layers.append(
            Conv2d_Block((1, 1),
                         num_filters=mid_filters,
                         strides=1,
                         auto_pad=True,
                         padding_mode='zero',
                         normalization='batch',
                         activation='relu6'))

    layers.append(
        DepthwiseConv2d_Block((3, 3),
                              depth_multiplier=1,
                              strides=strides,
                              auto_pad=True,
                              padding_mode='zero',
                              normalization='batch',
                              activation='relu6'))
    layers.append(
        Conv2d_Block((1, 1),
                     num_filters=num_filters,
                     strides=1,
                     auto_pad=False,
                     padding_mode='zero',
                     normalization='batch',
                     activation=None))
    if strides == 1 and in_filters == num_filters:
        return ShortCut2d(Sequential(*layers), Identity(), activation=None)
    else:
        return Sequential(*layers)
def o_net():
    return Sequential(Conv2d((3, 3),
                             32,
                             strides=1,
                             auto_pad=False,
                             use_bias=True,
                             name='conv1'),
                      PRelu(num_parameters=1),
                      MaxPool2d((3, 3), strides=2, auto_pad=False),
                      Conv2d((3, 3),
                             64,
                             strides=1,
                             auto_pad=False,
                             use_bias=True,
                             name='conv2'),
                      PRelu(num_parameters=1),
                      MaxPool2d((3, 3), strides=2, auto_pad=False),
                      Conv2d((3, 3),
                             64,
                             strides=1,
                             auto_pad=False,
                             use_bias=True,
                             name='conv3'),
                      PRelu(num_parameters=1),
                      MaxPool2d((2, 2), strides=2, auto_pad=False),
                      Conv2d((2, 2),
                             128,
                             strides=1,
                             auto_pad=False,
                             use_bias=True,
                             name='conv4'),
                      PRelu(num_parameters=1),
                      Flatten(),
                      Dense(256, activation=None, use_bias=True, name='conv5'),
                      PRelu(num_parameters=1),
                      Combine(
                          Dense(1,
                                activation='sigmoid',
                                use_bias=True,
                                name='conv6_1'),
                          Dense(4,
                                activation=None,
                                use_bias=True,
                                name='conv6_2'),
                          Dense(10,
                                activation=None,
                                use_bias=True,
                                name='conv6_3')),
                      name='onet')