示例#1
0
    def __init__(self, in_dim, out_dim, expand):
        super().__init__()

        expand_dim = expand * in_dim

        self.conv = nn.Sequential(
            layers.ConvBNReLU(in_dim, in_dim, 3),
            layers.DepthwiseConvBN(in_dim, expand_dim, 3),
            layers.ConvBN(expand_dim, out_dim, 1))
示例#2
0
 def __init__(self,
              in_channels,
              out_channels,
              kernel_size,
              padding='same',
              **kwargs):
     super().__init__()
     self.depthwise_conv = layers.ConvBNReLU(in_channels,
                                             out_channels=in_channels,
                                             kernel_size=kernel_size,
                                             padding=padding,
                                             groups=in_channels,
                                             **kwargs)
     self.piontwise_conv = layers.ConvBNReLU(in_channels,
                                             out_channels,
                                             kernel_size=1,
                                             groups=1,
                                             bias_attr=False)
示例#3
0
    def __init__(self, in_dim, out_dim):
        super(ContextEmbeddingBlock, self).__init__()

        self.gap = nn.AdaptiveAvgPool2D(1)
        self.bn = layers.SyncBatchNorm(in_dim)

        self.conv_1x1 = layers.ConvBNReLU(in_dim, out_dim, 1)
        self.add = layers.Add()
        self.conv_3x3 = nn.Conv2D(out_dim, out_dim, 3, 1, 1)
示例#4
0
    def __init__(self,
                 num_classes,
                 backbone_indices,
                 backbone_channels,
                 gc_channels,
                 ratio,
                 enable_auxiliary_loss=True):

        super().__init__()

        in_channels = backbone_channels[1]
        self.conv_bn_relu1 = layers.ConvBNReLU(in_channels=in_channels,
                                               out_channels=gc_channels,
                                               kernel_size=3,
                                               padding=1)

        self.gc_block = GlobalContextBlock(in_channels=gc_channels,
                                           ratio=ratio)

        self.conv_bn_relu2 = layers.ConvBNReLU(in_channels=gc_channels,
                                               out_channels=gc_channels,
                                               kernel_size=3,
                                               padding=1)

        self.conv_bn_relu3 = layers.ConvBNReLU(in_channels=in_channels +
                                               gc_channels,
                                               out_channels=gc_channels,
                                               kernel_size=3,
                                               padding=1)

        self.dropout = nn.Dropout(p=0.1)

        self.conv = nn.Conv2D(in_channels=gc_channels,
                              out_channels=num_classes,
                              kernel_size=1)

        if enable_auxiliary_loss:
            self.auxlayer = layers.AuxLayer(
                in_channels=backbone_channels[0],
                inter_channels=backbone_channels[0] // 4,
                out_channels=num_classes)

        self.backbone_indices = backbone_indices
        self.enable_auxiliary_loss = enable_auxiliary_loss
示例#5
0
    def __init__(self, num_classes, backbone_indices, backbone_channels,
                 aspp_ratios, aspp_out_channels, align_corners):
        super().__init__()
        self.backbone_indices = backbone_indices
        self.align_corners = align_corners
        self.aspp = layers.ASPPModule(
            aspp_ratios=aspp_ratios,
            in_channels=backbone_channels[backbone_indices[1]],
            out_channels=aspp_out_channels,
            align_corners=align_corners,
            image_pooling=True)

        self.bot_fine = nn.Conv2D(backbone_channels[backbone_indices[0]],
                                  48,
                                  1,
                                  bias_attr=False)
        # decoupled
        self.squeeze_body_edge = SqueezeBodyEdge(
            256, align_corners=self.align_corners)
        self.edge_fusion = nn.Conv2D(256 + 48, 256, 1, bias_attr=False)
        self.sigmoid_edge = nn.Sigmoid()
        self.edge_out = nn.Sequential(
            layers.ConvBNReLU(in_channels=256,
                              out_channels=48,
                              kernel_size=3,
                              bias_attr=False),
            nn.Conv2D(48, 1, 1, bias_attr=False))
        self.dsn_seg_body = nn.Sequential(
            layers.ConvBNReLU(in_channels=256,
                              out_channels=256,
                              kernel_size=3,
                              bias_attr=False),
            nn.Conv2D(256, num_classes, 1, bias_attr=False))

        self.final_seg = nn.Sequential(
            layers.ConvBNReLU(in_channels=512,
                              out_channels=256,
                              kernel_size=3,
                              bias_attr=False),
            layers.ConvBNReLU(in_channels=256,
                              out_channels=256,
                              kernel_size=3,
                              bias_attr=False),
            nn.Conv2D(256, num_classes, kernel_size=1, bias_attr=False))
示例#6
0
    def __init__(self,
                 num_classes,
                 in_channels,
                 reduction,
                 use_scale,
                 mode,
                 temperature,
                 concat_input=True,
                 enable_auxiliary_loss=True,
                 **kwargs):
        super(DNLHead, self).__init__()
        self.in_channels = in_channels[-1]
        self.concat_input = concat_input
        self.enable_auxiliary_loss = enable_auxiliary_loss
        inter_channels = self.in_channels // 4

        self.dnl_block = DisentangledNonLocal2D(in_channels=inter_channels,
                                                reduction=reduction,
                                                use_scale=use_scale,
                                                temperature=temperature,
                                                mode=mode)
        self.conv0 = layers.ConvBNReLU(in_channels=self.in_channels,
                                       out_channels=inter_channels,
                                       kernel_size=3,
                                       bias_attr=False)
        self.conv1 = layers.ConvBNReLU(in_channels=inter_channels,
                                       out_channels=inter_channels,
                                       kernel_size=3,
                                       bias_attr=False)
        self.cls = nn.Sequential(nn.Dropout2D(p=0.1),
                                 nn.Conv2D(inter_channels, num_classes, 1))
        self.aux = nn.Sequential(
            layers.ConvBNReLU(in_channels=1024,
                              out_channels=256,
                              kernel_size=3,
                              bias_attr=False), nn.Dropout2D(p=0.1),
            nn.Conv2D(256, num_classes, 1))
        if self.concat_input:
            self.conv_cat = layers.ConvBNReLU(self.in_channels +
                                              inter_channels,
                                              inter_channels,
                                              kernel_size=3,
                                              bias_attr=False)
示例#7
0
    def __init__(self,
                 aspp_ratios,
                 in_channels,
                 out_channels,
                 align_corners,
                 use_sep_conv=False,
                 image_pooling=False):
        super().__init__()

        self.align_corners = align_corners
        self.aspp_blocks = nn.LayerList()

        for ratio in aspp_ratios:
            if use_sep_conv and ratio > 1:
                conv_func = layers.SeparableConvBNReLU
            else:
                conv_func = layers.ConvBNReLU

            block = conv_func(in_channels=in_channels,
                              out_channels=out_channels,
                              kernel_size=1 if ratio == 1 else 3,
                              dilation=ratio,
                              padding=0 if ratio == 1 else ratio)
            self.aspp_blocks.append(block)

        out_size = len(self.aspp_blocks)

        if image_pooling:
            self.global_avg_pool = nn.Sequential(
                nn.AdaptiveAvgPool2D(output_size=(1, 1)),
                layers.ConvBNReLU(in_channels,
                                  out_channels,
                                  kernel_size=1,
                                  bias_attr=False))
            out_size += 1
        self.image_pooling = image_pooling

        self.conv_bn_relu = layers.ConvBNReLU(in_channels=out_channels *
                                              out_size,
                                              out_channels=out_channels,
                                              kernel_size=1)

        self.dropout = nn.Dropout(p=0.1)  # drop rate
示例#8
0
 def __init__(self, inplane, align_corners=False):
     super().__init__()
     self.align_corners = align_corners
     self.down = nn.Sequential(
         layers.ConvBNReLU(inplane,
                           inplane,
                           kernel_size=3,
                           groups=inplane,
                           stride=2),
         layers.ConvBNReLU(inplane,
                           inplane,
                           kernel_size=3,
                           groups=inplane,
                           stride=2))
     self.flow_make = nn.Conv2D(inplane * 2,
                                2,
                                kernel_size=3,
                                padding='same',
                                bias_attr=False)
示例#9
0
    def __init__(self,
                 num_channels,
                 num_filters,
                 has_se,
                 stride=1,
                 downsample=False,
                 name=None):
        super(BottleneckBlock, self).__init__()

        self.has_se = has_se
        self.downsample = downsample

        self.conv1 = layers.ConvBNReLU(in_channels=num_channels,
                                       out_channels=num_filters,
                                       kernel_size=1,
                                       padding='same',
                                       bias_attr=False)

        self.conv2 = layers.ConvBNReLU(in_channels=num_filters,
                                       out_channels=num_filters,
                                       kernel_size=3,
                                       stride=stride,
                                       padding='same',
                                       bias_attr=False)

        self.conv3 = layers.ConvBN(in_channels=num_filters,
                                   out_channels=num_filters * 4,
                                   kernel_size=1,
                                   padding='same',
                                   bias_attr=False)

        if self.downsample:
            self.conv_down = layers.ConvBN(in_channels=num_channels,
                                           out_channels=num_filters * 4,
                                           kernel_size=1,
                                           padding='same',
                                           bias_attr=False)

        if self.has_se:
            self.se = SELayer(num_channels=num_filters * 4,
                              num_filters=num_filters * 4,
                              reduction_ratio=16,
                              name=name + '_fc')
示例#10
0
    def __init__(self,
                 num_classes,
                 in_channels,
                 ema_channels,
                 gc_channels,
                 num_bases,
                 stage_num,
                 momentum,
                 concat_input=True,
                 enable_auxiliary_loss=True):
        super(EMAHead, self).__init__()

        self.in_channels = in_channels[-1]
        self.concat_input = concat_input
        self.enable_auxiliary_loss = enable_auxiliary_loss

        self.emau = EMAU(ema_channels, num_bases, stage_num, momentum=momentum)
        self.ema_in_conv = layers.ConvBNReLU(in_channels=self.in_channels,
                                             out_channels=ema_channels,
                                             kernel_size=3)
        self.ema_mid_conv = nn.Conv2D(ema_channels,
                                      ema_channels,
                                      kernel_size=1)
        for param in self.ema_mid_conv.parameters():
            param.stop_gradient = True
        self.ema_out_conv = layers.ConvBNReLU(in_channels=ema_channels,
                                              out_channels=ema_channels,
                                              kernel_size=1)
        self.bottleneck = layers.ConvBNReLU(in_channels=ema_channels,
                                            out_channels=gc_channels,
                                            kernel_size=3)
        self.cls = nn.Sequential(nn.Dropout2D(p=0.1),
                                 nn.Conv2D(gc_channels, num_classes, 1))
        self.aux = nn.Sequential(
            layers.ConvBNReLU(in_channels=1024,
                              out_channels=256,
                              kernel_size=3), nn.Dropout2D(p=0.1),
            nn.Conv2D(256, num_classes, 1))
        if self.concat_input:
            self.conv_cat = layers.ConvBNReLU(self.in_channels + gc_channels,
                                              gc_channels,
                                              kernel_size=3)
示例#11
0
 def __init__(self, in_channels, out_channels, scale, is_aux=False):
     super().__init__()
     inner_channel = 128 if is_aux else 64
     self.conv_3x3 = layers.ConvBNReLU(in_channels,
                                       inner_channel,
                                       3,
                                       stride=1,
                                       padding=1,
                                       bias_attr=False)
     self.conv_1x1 = nn.Conv2D(inner_channel, out_channels, 1)
     self.scale = scale
示例#12
0
 def __init__(self, in_chan, mid_chan, n_classes):
     super(SegHead, self).__init__()
     self.conv = layers.ConvBNReLU(in_chan,
                                   mid_chan,
                                   kernel_size=3,
                                   stride=1,
                                   padding=1)
     self.conv_out = nn.Conv2D(mid_chan,
                               n_classes,
                               kernel_size=1,
                               bias_attr=None)
示例#13
0
    def __init__(self,
                 in_channels,
                 key_channels,
                 out_channels,
                 dropout_rate=0.1):
        super().__init__()

        self.attention_block = ObjectAttentionBlock(in_channels, key_channels)
        self.conv1x1 = nn.Sequential(
            layers.ConvBNReLU(2 * in_channels, out_channels, 1),
            nn.Dropout2D(dropout_rate))
示例#14
0
    def __init__(self,
                 num_classes,
                 backbone,
                 conv_channel=128,
                 pretrained=None):
        super().__init__()
        self.backbone = backbone
        self.spatial_path = SpatialPath(3, 128)
        self.global_context = nn.Sequential(
            nn.AdaptiveAvgPool2D(1),
            layers.ConvBNReLU(512, conv_channel, 1, bias_attr=False),
        )

        self.arms = nn.LayerList([
            AttentionRefinement(512, conv_channel),
            AttentionRefinement(256, conv_channel),
        ])
        self.refines = nn.LayerList([
            layers.ConvBNReLU(conv_channel,
                              conv_channel,
                              3,
                              stride=1,
                              padding=1,
                              bias_attr=False),
            layers.ConvBNReLU(conv_channel,
                              conv_channel,
                              3,
                              stride=1,
                              padding=1,
                              bias_attr=False),
        ])

        self.heads = nn.LayerList([
            BiSeNetHead(conv_channel, num_classes, 8, True),
            BiSeNetHead(conv_channel, num_classes, 8, True),
            BiSeNetHead(conv_channel * 2, num_classes, 8, False),
        ])

        self.ffm = FeatureFusion(conv_channel * 2, conv_channel * 2, 1)

        self.pretrained = pretrained
示例#15
0
    def __init__(self, filter_size, fusion, in_channels, channels):
        super().__init__()
        self.filter_size = filter_size
        self.fusion = fusion
        self.channels = channels

        pad = (self.filter_size - 1) // 2
        if (self.filter_size - 1) % 2 == 0:
            self.pad = (pad, pad, pad, pad)
        else:
            self.pad = (pad + 1, pad, pad + 1, pad)

        self.avg_pool = nn.AdaptiveAvgPool2D(filter_size)
        self.filter_gen_conv = nn.Conv2D(in_channels, channels, 1)
        self.input_redu_conv = layers.ConvBNReLU(in_channels, channels, 1)

        self.norm = layers.SyncBatchNorm(channels)
        self.act = nn.ReLU()

        if self.fusion:
            self.fusion_conv = layers.ConvBNReLU(channels, channels, 1)
示例#16
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 multi_scale_output=True,
                 name=None,
                 align_corners=False):
        super(FuseLayers, self).__init__()

        self._actual_ch = len(in_channels) if multi_scale_output else 1
        self._in_channels = in_channels
        self.align_corners = align_corners

        self.residual_func_list = []
        for i in range(self._actual_ch):
            for j in range(len(in_channels)):
                if j > i:
                    residual_func = self.add_sublayer(
                        "residual_{}_layer_{}_{}".format(name, i + 1, j + 1),
                        layers.ConvBN(
                            in_channels=in_channels[j],
                            out_channels=out_channels[i],
                            kernel_size=1,
                            padding='same',
                            bias_attr=False))
                    self.residual_func_list.append(residual_func)
                elif j < i:
                    pre_num_filters = in_channels[j]
                    for k in range(i - j):
                        if k == i - j - 1:
                            residual_func = self.add_sublayer(
                                "residual_{}_layer_{}_{}_{}".format(
                                    name, i + 1, j + 1, k + 1),
                                layers.ConvBN(
                                    in_channels=pre_num_filters,
                                    out_channels=out_channels[i],
                                    kernel_size=3,
                                    stride=2,
                                    padding='same',
                                    bias_attr=False))
                            pre_num_filters = out_channels[i]
                        else:
                            residual_func = self.add_sublayer(
                                "residual_{}_layer_{}_{}_{}".format(
                                    name, i + 1, j + 1, k + 1),
                                layers.ConvBNReLU(
                                    in_channels=pre_num_filters,
                                    out_channels=out_channels[j],
                                    kernel_size=3,
                                    stride=2,
                                    padding='same',
                                    bias_attr=False))
                            pre_num_filters = out_channels[j]
                        self.residual_func_list.append(residual_func)
示例#17
0
    def __init__(self,
                 num_channels,
                 num_filters,
                 stride=1,
                 has_se=False,
                 downsample=False,
                 name=None,
                 padding_same=True):
        super(BasicBlock, self).__init__()

        self.has_se = has_se
        self.downsample = downsample

        self.conv1 = layers.ConvBNReLU(
            in_channels=num_channels,
            out_channels=num_filters,
            kernel_size=3,
            stride=stride,
            padding=1 if not padding_same else 'same',
            bias_attr=False)
        self.conv2 = layers.ConvBN(in_channels=num_filters,
                                   out_channels=num_filters,
                                   kernel_size=3,
                                   padding=1 if not padding_same else 'same',
                                   bias_attr=False)

        if self.downsample:
            self.conv_down = layers.ConvBNReLU(in_channels=num_channels,
                                               out_channels=num_filters,
                                               kernel_size=1,
                                               bias_attr=False)

        if self.has_se:
            self.se = SELayer(num_channels=num_filters,
                              num_filters=num_filters,
                              reduction_ratio=16,
                              name=name + '_fc')

        self.add = layers.Add()
        self.relu = layers.Activation("relu")
示例#18
0
 def __init__(self, in_chan, out_chan):
     super(AttentionRefinementModule, self).__init__()
     self.conv = layers.ConvBNReLU(in_chan,
                                   out_chan,
                                   kernel_size=3,
                                   stride=1,
                                   padding=1)
     self.conv_atten = nn.Conv2D(out_chan,
                                 out_chan,
                                 kernel_size=1,
                                 bias_attr=None)
     self.bn_atten = nn.BatchNorm2D(out_chan)
     self.sigmoid_atten = nn.Sigmoid()
示例#19
0
 def __init__(self, dim_in, proj_dim=256, proj='convmlp'):
     super(ProjectionHead, self).__init__()
     if proj == 'linear':
         self.proj = nn.Conv2D(dim_in, proj_dim, kernel_size=1)
     elif proj == 'convmlp':
         self.proj = nn.Sequential(
             layers.ConvBNReLU(dim_in, dim_in, kernel_size=1),
             nn.Conv2D(dim_in, proj_dim, kernel_size=1),
         )
     else:
         raise ValueError(
             "The type of project head only support 'linear' and 'convmlp', but got {}."
             .format(proj))
示例#20
0
 def __init__(self):
     super().__init__()
     self.conv1 = layers.ConvBNReLU(4,
                                    64,
                                    kernel_size=3,
                                    padding=1,
                                    bias_attr=False)
     self.conv2 = layers.ConvBNReLU(64,
                                    64,
                                    kernel_size=3,
                                    padding=1,
                                    bias_attr=False)
     self.conv3 = layers.ConvBNReLU(64,
                                    64,
                                    kernel_size=3,
                                    padding=1,
                                    bias_attr=False)
     self.alpha_pred = layers.ConvBNReLU(64,
                                         1,
                                         kernel_size=3,
                                         padding=1,
                                         bias_attr=False)
示例#21
0
    def __init__(self,
                 num_classes,
                 in_channels,
                 ocr_mid_channels=512,
                 ocr_key_channels=256):
        super().__init__()

        self.num_classes = num_classes
        self.spatial_gather = SpatialGatherBlock(ocr_mid_channels, num_classes)
        self.spatial_ocr = SpatialOCRModule(ocr_mid_channels, ocr_key_channels,
                                            ocr_mid_channels)

        self.indices = [-2, -1] if len(in_channels) > 1 else [-1, -1]

        self.conv3x3_ocr = layers.ConvBNReLU(
            in_channels[self.indices[1]], ocr_mid_channels, 3, padding=1)
        self.cls_head = nn.Conv2D(ocr_mid_channels, self.num_classes, 1)
        self.aux_head = nn.Sequential(
            layers.ConvBNReLU(in_channels[self.indices[0]],
                              in_channels[self.indices[0]], 1),
            nn.Conv2D(in_channels[self.indices[0]], self.num_classes, 1))

        self.init_weight()
示例#22
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 align_corners,
                 use_deconv=False):
        super().__init__()

        self.align_corners = align_corners

        self.use_deconv = use_deconv
        if self.use_deconv:
            self.deconv = nn.Conv2DTranspose(in_channels,
                                             out_channels // 2,
                                             kernel_size=2,
                                             stride=2,
                                             padding=0)
            in_channels = in_channels + out_channels // 2
        else:
            in_channels *= 2

        self.double_conv = nn.Sequential(
            layers.ConvBNReLU(in_channels, out_channels, 3),
            layers.ConvBNReLU(out_channels, out_channels, 3))
示例#23
0
    def __init__(self, num_classes, in_channels):
        super().__init__()
        in_channels = in_channels[-1]
        inter_channels = in_channels // 4

        self.channel_conv = layers.ConvBNReLU(in_channels, inter_channels, 3)
        self.position_conv = layers.ConvBNReLU(in_channels, inter_channels, 3)
        self.pam = PAM(inter_channels)
        self.cam = CAM()
        self.conv1 = layers.ConvBNReLU(inter_channels, inter_channels, 3)
        self.conv2 = layers.ConvBNReLU(inter_channels, inter_channels, 3)

        self.aux_head = nn.Sequential(nn.Dropout2D(0.1),
                                      nn.Conv2D(in_channels, num_classes, 1))

        self.aux_head_pam = nn.Sequential(
            nn.Dropout2D(0.1), nn.Conv2D(inter_channels, num_classes, 1))

        self.aux_head_cam = nn.Sequential(
            nn.Dropout2D(0.1), nn.Conv2D(inter_channels, num_classes, 1))

        self.cls_head = nn.Sequential(
            nn.Dropout2D(0.1), nn.Conv2D(inter_channels, num_classes, 1))
示例#24
0
    def __init__(self, in_dim, out_dim, expand):
        super().__init__()

        expand_dim = expand * in_dim

        self.branch_1 = nn.Sequential(
            layers.ConvBNReLU(in_dim, in_dim, 3),
            layers.DepthwiseConvBN(in_dim, expand_dim, 3, stride=2),
            layers.DepthwiseConvBN(expand_dim, expand_dim, 3),
            layers.ConvBN(expand_dim, out_dim, 1))

        self.branch_2 = nn.Sequential(
            layers.DepthwiseConvBN(in_dim, in_dim, 3, stride=2),
            layers.ConvBN(in_dim, out_dim, 1))
示例#25
0
 def __init__(self, backbone, use_conv_last=False):
     super(ContextPath, self).__init__()
     self.backbone = backbone
     self.arm16 = AttentionRefinementModule(512, 128)
     inplanes = 1024
     if use_conv_last:
         inplanes = 1024
     self.arm32 = AttentionRefinementModule(inplanes, 128)
     self.conv_head32 = layers.ConvBNReLU(128,
                                          128,
                                          kernel_size=3,
                                          stride=1,
                                          padding=1)
     self.conv_head16 = layers.ConvBNReLU(128,
                                          128,
                                          kernel_size=3,
                                          stride=1,
                                          padding=1)
     self.conv_avg = layers.ConvBNReLU(inplanes,
                                       128,
                                       kernel_size=1,
                                       stride=1,
                                       padding=0)
示例#26
0
    def __init__(self, high_in_channels, low_in_channels, out_channels):
        super().__init__()

        # Only depth-wise conv
        self.dwconv = layers.ConvBNReLU(
            in_channels=low_in_channels,
            out_channels=out_channels,
            kernel_size=3,
            padding=1,
            groups=128,
            bias_attr=False)

        self.conv_low_res = layers.ConvBN(out_channels, out_channels, 1)
        self.conv_high_res = layers.ConvBN(high_in_channels, out_channels, 1)
示例#27
0
    def __init__(self, num_classes, in_channels, align_corners):
        super(Decoder, self).__init__()

        self.conv_bn_relu1 = layers.ConvBNReLU(
            in_channels=in_channels, out_channels=48, kernel_size=1)

        self.conv_bn_relu2 = layers.SeparableConvBNReLU(
            in_channels=304, out_channels=256, kernel_size=3, padding=1)
        self.conv_bn_relu3 = layers.SeparableConvBNReLU(
            in_channels=256, out_channels=256, kernel_size=3, padding=1)
        self.conv = nn.Conv2D(
            in_channels=256, out_channels=num_classes, kernel_size=1)

        self.align_corners = align_corners
示例#28
0
    def __init__(self,
                 low_in_channels,
                 high_in_channels,
                 key_channels,
                 value_channels,
                 out_channels=None,
                 scale=1,
                 psp_size=(1, 3, 6, 8)):
        super().__init__()

        self.scale = scale
        self.in_channels = low_in_channels
        self.out_channels = out_channels
        self.key_channels = key_channels
        self.value_channels = value_channels
        if out_channels == None:
            self.out_channels = high_in_channels
        self.pool = nn.MaxPool2D(scale)
        self.f_key = layers.ConvBNReLU(
            in_channels=low_in_channels,
            out_channels=key_channels,
            kernel_size=1)
        self.f_query = layers.ConvBNReLU(
            in_channels=high_in_channels,
            out_channels=key_channels,
            kernel_size=1)
        self.f_value = nn.Conv2D(
            in_channels=low_in_channels,
            out_channels=value_channels,
            kernel_size=1)

        self.W = nn.Conv2D(
            in_channels=value_channels,
            out_channels=out_channels,
            kernel_size=1)

        self.psp_size = psp_size
示例#29
0
 def __init__(self, mlahead_channels=128):
     super(MLAHeads, self).__init__()
     self.head2 = nn.Sequential(
         layers.ConvBNReLU(mlahead_channels * 2,
                           mlahead_channels,
                           3,
                           padding=1,
                           bias_attr=False),
         layers.ConvBNReLU(mlahead_channels,
                           mlahead_channels,
                           3,
                           padding=1,
                           bias_attr=False))
     self.head3 = nn.Sequential(
         layers.ConvBNReLU(mlahead_channels * 2,
                           mlahead_channels,
                           3,
                           padding=1,
                           bias_attr=False),
         layers.ConvBNReLU(mlahead_channels,
                           mlahead_channels,
                           3,
                           padding=1,
                           bias_attr=False))
     self.head4 = nn.Sequential(
         layers.ConvBNReLU(mlahead_channels * 2,
                           mlahead_channels,
                           3,
                           padding=1,
                           bias_attr=False),
         layers.ConvBNReLU(mlahead_channels,
                           mlahead_channels,
                           3,
                           padding=1,
                           bias_attr=False))
     self.head5 = nn.Sequential(
         layers.ConvBNReLU(mlahead_channels * 2,
                           mlahead_channels,
                           3,
                           padding=1,
                           bias_attr=False),
         layers.ConvBNReLU(mlahead_channels,
                           mlahead_channels,
                           3,
                           padding=1,
                           bias_attr=False))
示例#30
0
    def build_project(self, in_channels, channels, num_convs, use_conv_module):
        if use_conv_module:
            convs = [
                layers.ConvBNReLU(in_channels=in_channels,
                                  out_channels=channels,
                                  kernel_size=1,
                                  bias_attr=False)
            ]
            for _ in range(num_convs - 1):
                convs.append(
                    layers.ConvBNReLU(in_channels=channels,
                                      out_channels=channels,
                                      kernel_size=1,
                                      bias_attr=False))
        else:
            convs = [nn.Conv2D(in_channels, channels, 1)]
            for _ in range(num_convs - 1):
                convs.append(nn.Conv2D(channels, channels, 1))

        if len(convs) > 1:
            convs = nn.Sequential(*convs)
        else:
            convs = convs[0]
        return convs