Esempio n. 1
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 multi_scale_output=True,
                 name=None,
                 align_corners=False):
        super(FuseLayers, self).__init__()

        self._actual_ch = len(in_channels) if multi_scale_output else 1
        self._in_channels = in_channels
        self.align_corners = align_corners

        self.residual_func_list = []
        for i in range(self._actual_ch):
            for j in range(len(in_channels)):
                if j > i:
                    residual_func = self.add_sublayer(
                        "residual_{}_layer_{}_{}".format(name, i + 1, j + 1),
                        layers.ConvBN(
                            in_channels=in_channels[j],
                            out_channels=out_channels[i],
                            kernel_size=1,
                            padding='same',
                            bias_attr=False))
                    self.residual_func_list.append(residual_func)
                elif j < i:
                    pre_num_filters = in_channels[j]
                    for k in range(i - j):
                        if k == i - j - 1:
                            residual_func = self.add_sublayer(
                                "residual_{}_layer_{}_{}_{}".format(
                                    name, i + 1, j + 1, k + 1),
                                layers.ConvBN(
                                    in_channels=pre_num_filters,
                                    out_channels=out_channels[i],
                                    kernel_size=3,
                                    stride=2,
                                    padding='same',
                                    bias_attr=False))
                            pre_num_filters = out_channels[i]
                        else:
                            residual_func = self.add_sublayer(
                                "residual_{}_layer_{}_{}_{}".format(
                                    name, i + 1, j + 1, k + 1),
                                layers.ConvBNReLU(
                                    in_channels=pre_num_filters,
                                    out_channels=out_channels[j],
                                    kernel_size=3,
                                    stride=2,
                                    padding='same',
                                    bias_attr=False))
                            pre_num_filters = out_channels[j]
                        self.residual_func_list.append(residual_func)
Esempio n. 2
0
    def __init__(self, in_dim, out_dim, expand):
        super().__init__()

        expand_dim = expand * in_dim

        self.branch_1 = nn.Sequential(
            layers.ConvBNReLU(in_dim, in_dim, 3),
            layers.DepthwiseConvBN(in_dim, expand_dim, 3, stride=2),
            layers.DepthwiseConvBN(expand_dim, expand_dim, 3),
            layers.ConvBN(expand_dim, out_dim, 1))

        self.branch_2 = nn.Sequential(
            layers.DepthwiseConvBN(in_dim, in_dim, 3, stride=2),
            layers.ConvBN(in_dim, out_dim, 1))
Esempio n. 3
0
    def __init__(self, high_in_channels, low_in_channels, out_channels):
        super().__init__()

        # Only depth-wise conv
        self.dwconv = layers.ConvBNReLU(
            in_channels=low_in_channels,
            out_channels=out_channels,
            kernel_size=3,
            padding=1,
            groups=128,
            bias_attr=False)

        self.conv_low_res = layers.ConvBN(out_channels, out_channels, 1)
        self.conv_high_res = layers.ConvBN(high_in_channels, out_channels, 1)
Esempio n. 4
0
    def __init__(self,
                 num_channels,
                 num_filters,
                 has_se,
                 stride=1,
                 downsample=False,
                 name=None):
        super(BottleneckBlock, self).__init__()

        self.has_se = has_se
        self.downsample = downsample

        self.conv1 = layers.ConvBNReLU(
            in_channels=num_channels,
            out_channels=num_filters,
            kernel_size=1,
            padding='same',
            bias_attr=False)

        self.conv2 = layers.ConvBNReLU(
            in_channels=num_filters,
            out_channels=num_filters,
            kernel_size=3,
            stride=stride,
            padding='same',
            bias_attr=False)

        self.conv3 = layers.ConvBN(
            in_channels=num_filters,
            out_channels=num_filters * 4,
            kernel_size=1,
            padding='same',
            bias_attr=False)

        if self.downsample:
            self.conv_down = layers.ConvBN(
                in_channels=num_channels,
                out_channels=num_filters * 4,
                kernel_size=1,
                padding='same',
                bias_attr=False)

        if self.has_se:
            self.se = SELayer(
                num_channels=num_filters * 4,
                num_filters=num_filters * 4,
                reduction_ratio=16,
                name=name + '_fc')
Esempio n. 5
0
    def __init__(self, in_channels, out_channels, expansion=6, stride=2):
        super().__init__()

        self.use_shortcut = stride == 1 and in_channels == out_channels

        expand_channels = in_channels * expansion
        self.block = nn.Sequential(
            # pw
            layers.ConvBNReLU(
                in_channels=in_channels,
                out_channels=expand_channels,
                kernel_size=1,
                bias_attr=False),
            # dw
            layers.ConvBNReLU(
                in_channels=expand_channels,
                out_channels=expand_channels,
                kernel_size=3,
                stride=stride,
                padding=1,
                groups=expand_channels,
                bias_attr=False),
            # pw-linear
            layers.ConvBN(
                in_channels=expand_channels,
                out_channels=out_channels,
                kernel_size=1,
                bias_attr=False))
Esempio n. 6
0
    def __init__(self, in_dim, out_dim, expand):
        super().__init__()

        expand_dim = expand * in_dim

        self.conv = nn.Sequential(
            layers.ConvBNReLU(in_dim, in_dim, 3),
            layers.DepthwiseConvBN(in_dim, expand_dim, 3),
            layers.ConvBN(expand_dim, out_dim, 1))
Esempio n. 7
0
    def __init__(self, out_dim):
        super().__init__()

        self.db_branch_keep = nn.Sequential(
            layers.DepthwiseConvBN(out_dim, out_dim, 3),
            nn.Conv2D(out_dim, out_dim, 1))

        self.db_branch_down = nn.Sequential(
            layers.ConvBN(out_dim, out_dim, 3, stride=2),
            nn.AvgPool2D(kernel_size=3, stride=2, padding=1))

        self.sb_branch_keep = nn.Sequential(
            layers.DepthwiseConvBN(out_dim, out_dim, 3),
            nn.Conv2D(out_dim, out_dim, 1), layers.Activation(act='sigmoid'))

        self.sb_branch_up = layers.ConvBN(out_dim, out_dim, 3)

        self.conv = layers.ConvBN(out_dim, out_dim, 3)
Esempio n. 8
0
    def __init__(self,
                 num_channels,
                 num_filters,
                 stride=1,
                 has_se=False,
                 downsample=False,
                 name=None,
                 padding_same=True):
        super(BasicBlock, self).__init__()

        self.has_se = has_se
        self.downsample = downsample

        self.conv1 = layers.ConvBNReLU(
            in_channels=num_channels,
            out_channels=num_filters,
            kernel_size=3,
            stride=stride,
            padding=1 if not padding_same else 'same',
            bias_attr=False)
        self.conv2 = layers.ConvBN(in_channels=num_filters,
                                   out_channels=num_filters,
                                   kernel_size=3,
                                   padding=1 if not padding_same else 'same',
                                   bias_attr=False)

        if self.downsample:
            self.conv_down = layers.ConvBNReLU(in_channels=num_channels,
                                               out_channels=num_filters,
                                               kernel_size=1,
                                               bias_attr=False)

        if self.has_se:
            self.se = SELayer(num_channels=num_filters,
                              num_filters=num_filters,
                              reduction_ratio=16,
                              name=name + '_fc')

        self.add = layers.Add()
        self.relu = layers.Activation("relu")
Esempio n. 9
0
    def __init__(self,
                 low_in_channels,
                 high_in_channels,
                 out_channels,
                 key_channels,
                 value_channels,
                 dropout_prob,
                 repeat_sizes=([1]),
                 psp_size=(1, 3, 6, 8)):
        super().__init__()

        self.psp_size = psp_size
        self.stages = nn.LayerList([
            SelfAttentionBlock_AFNB(low_in_channels, high_in_channels,
                                    key_channels, value_channels, out_channels,
                                    size) for size in repeat_sizes
        ])
        self.conv_bn = layers.ConvBN(in_channels=out_channels +
                                     high_in_channels,
                                     out_channels=out_channels,
                                     kernel_size=1)
        self.dropout = nn.Dropout(p=dropout_prob)
Esempio n. 10
0
 def __init__(self,
              in_channels,
              out_channels,
              branches=4,
              kernel_size_maximum=9,
              shortcut=True):
     super().__init__()
     if out_channels < in_channels:
         raise RuntimeError(
             "The out_channes for DownSampler should be bigger than in_channels, but got in_channles={}, out_channels={}"
             .format(in_channels, out_channels))
     self.eesp = EESP(in_channels,
                      out_channels - in_channels,
                      stride=2,
                      branches=branches,
                      kernel_size_maximum=kernel_size_maximum,
                      down_method='avg')
     self.avg = nn.AvgPool2D(kernel_size=3, padding=1, stride=2)
     if shortcut:
         self.shortcut_layer = nn.Sequential(
             layers.ConvBNPReLU(3, 3, 3, stride=1, bias_attr=False),
             layers.ConvBN(3, out_channels, 1, stride=1, bias_attr=False),
         )
     self._act = nn.PReLU()
Esempio n. 11
0
    def __init__(self,
                 num_classes,
                 in_channels,
                 backbone,
                 mlahead_channels=128,
                 aux_channels=256,
                 norm_layer=nn.BatchNorm2D,
                 pretrained=None,
                 **kwargs):
        super(MLATransformer, self).__init__()

        self.BatchNorm = norm_layer
        self.mlahead_channels = mlahead_channels
        self.num_classes = num_classes
        self.in_channels = in_channels
        self.backbone = backbone

        self.mlahead = MLAHeads(mlahead_channels=self.mlahead_channels)
        self.cls = nn.Conv2D(4 * self.mlahead_channels,
                             self.num_classes,
                             3,
                             padding=1)

        self.conv0 = layers.ConvBNReLU(self.in_channels[0],
                                       self.in_channels[0] * 2,
                                       3,
                                       padding=1,
                                       bias_attr=False)
        self.conv1 = layers.ConvBNReLU(self.in_channels[1],
                                       self.in_channels[1],
                                       3,
                                       padding=1,
                                       bias_attr=False)
        self.conv21 = layers.ConvBNReLU(self.in_channels[2],
                                        self.in_channels[2],
                                        3,
                                        padding=1,
                                        bias_attr=False)
        self.conv22 = layers.ConvBNReLU(self.in_channels[2],
                                        self.in_channels[2] // 2,
                                        3,
                                        padding=1,
                                        bias_attr=False)
        self.conv31 = layers.ConvBNReLU(self.in_channels[3],
                                        self.in_channels[3],
                                        3,
                                        padding=1,
                                        bias_attr=False)
        self.conv32 = layers.ConvBNReLU(self.in_channels[3],
                                        self.in_channels[3] // 2,
                                        3,
                                        padding=1,
                                        bias_attr=False)
        self.conv33 = layers.ConvBNReLU(self.in_channels[3] // 2,
                                        self.in_channels[3] // 4,
                                        3,
                                        padding=1,
                                        bias_attr=False)

        self.aux_head = nn.Sequential(
            layers.ConvBN(in_channels=self.in_channels[2],
                          out_channels=aux_channels,
                          kernel_size=3,
                          padding=1,
                          bias_attr=False),
            nn.Conv2D(
                in_channels=aux_channels,
                out_channels=self.num_classes,
                kernel_size=1,
            ))

        self.pretrained = pretrained
        self.init_weight()
Esempio n. 12
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 stride=1,
                 branches=4,
                 kernel_size_maximum=7,
                 down_method='esp'):
        super(EESP, self).__init__()
        if out_channels % branches != 0:
            raise RuntimeError(
                "The out_channes for EESP should be factorized by branches, but out_channels={} cann't be factorized by branches={}"
                .format(out_channels, branches))
        assert down_method in [
            'avg', 'esp'
        ], "The down_method for EESP only support 'avg' or 'esp', but got down_method={}".format(
            down_method)
        self.in_channels = in_channels
        self.stride = stride

        in_branch_channels = int(out_channels / branches)
        self.group_conv_in = layers.ConvBNPReLU(in_channels,
                                                in_branch_channels,
                                                1,
                                                stride=1,
                                                groups=branches,
                                                bias_attr=False)

        map_ksize_dilation = {
            3: 1,
            5: 2,
            7: 3,
            9: 4,
            11: 5,
            13: 6,
            15: 7,
            17: 8
        }
        self.kernel_sizes = []
        for i in range(branches):
            kernel_size = 3 + 2 * i
            kernel_size = kernel_size if kernel_size <= kernel_size_maximum else 3
            self.kernel_sizes.append(kernel_size)
        self.kernel_sizes.sort()

        self.spp_modules = nn.LayerList()
        for i in range(branches):
            dilation = map_ksize_dilation[self.kernel_sizes[i]]
            self.spp_modules.append(
                nn.Conv2D(in_branch_channels,
                          in_branch_channels,
                          kernel_size=3,
                          padding='same',
                          stride=stride,
                          dilation=dilation,
                          groups=in_branch_channels,
                          bias_attr=False))
        self.group_conv_out = layers.ConvBN(out_channels,
                                            out_channels,
                                            kernel_size=1,
                                            stride=1,
                                            groups=branches,
                                            bias_attr=False)
        self.bn_act = BNPReLU(out_channels)
        self._act = nn.PReLU()
        self.down_method = True if down_method == 'avg' else False