Example #1
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 internal_ratio=4,
                 return_indices=False,
                 dropout_prob=0,
                 bias=False,
                 relu=True):
        super(DownsamplingBottleneck, self).__init__()

        self.return_indices = return_indices

        if internal_ratio <= 1 or internal_ratio > in_channels:
            raise RuntimeError(
                "Value out of range. Expected value in the "
                "interval [1, {0}], got internal_scale={1}. ".format(
                    in_channels, internal_ratio))

        internal_channels = in_channels // internal_ratio

        if relu:
            activation = nn.ReLU
        else:
            activation = nn.PReLU

        self.main_max1 = nn.MaxPool2D(2, stride=2, return_mask=return_indices)

        self.ext_conv1 = nn.Sequential(
            nn.Conv2D(in_channels,
                      internal_channels,
                      kernel_size=2,
                      stride=2,
                      bias_attr=bias), layers.SyncBatchNorm(internal_channels),
            activation())

        self.ext_conv2 = nn.Sequential(
            nn.Conv2D(internal_channels,
                      internal_channels,
                      kernel_size=3,
                      stride=1,
                      padding=1,
                      bias_attr=bias), layers.SyncBatchNorm(internal_channels),
            activation())

        self.ext_conv3 = nn.Sequential(
            nn.Conv2D(internal_channels,
                      out_channels,
                      kernel_size=1,
                      stride=1,
                      bias_attr=bias), layers.SyncBatchNorm(out_channels),
            activation())

        self.ext_regul = nn.Dropout2D(p=dropout_prob)

        self.out_activation = activation()
Example #2
0
 def __init__(self,
              in_channels,
              out_channels,
              kernel_size,
              padding=0,
              stride=1,
              conv_cfg='Conv1D',
              norm_cfg='None',
              **kwargs):
     super().__init__()
     if (conv_cfg == 'Conv1D'):
         self._conv = nn.Conv1D(in_channels,
                                out_channels,
                                kernel_size,
                                stride=stride,
                                padding=padding,
                                **kwargs)
     if (conv_cfg == 'Conv2D'):
         self._conv = nn.Conv2D(in_channels,
                                out_channels,
                                kernel_size,
                                stride=stride,
                                padding=padding,
                                **kwargs)
     if 'data_format' in kwargs:
         data_format = kwargs['data_format']
     else:
         data_format = 'NCHW'
     if (norm_cfg != 'None'):
         self._batch_norm = layers.SyncBatchNorm(out_channels,
                                                 data_format=data_format)
     else:
         self._batch_norm = None
Example #3
0
    def __init__(self,
                 in_c,
                 out_c,
                 filter_size,
                 stride,
                 padding,
                 dilation=1,
                 num_groups=1,
                 if_act=True,
                 act=None):
        super(ConvBNLayer, self).__init__()
        self.if_act = if_act
        self.act = act

        self.conv = nn.Conv2D(in_channels=in_c,
                              out_channels=out_c,
                              kernel_size=filter_size,
                              stride=stride,
                              padding=padding,
                              dilation=dilation,
                              groups=num_groups,
                              bias_attr=False)
        self.bn = layers.SyncBatchNorm(
            num_features=out_c,
            weight_attr=paddle.ParamAttr(
                regularizer=paddle.regularizer.L2Decay(0.0)),
            bias_attr=paddle.ParamAttr(
                regularizer=paddle.regularizer.L2Decay(0.0)))
        self._act_op = layers.Activation(act='hardswish')
Example #4
0
    def __init__(
        self,
        in_channels,
        out_channels,
        kernel_size,
        stride=1,
        dilation=1,
        groups=1,
        is_vd_mode=False,
        act=None,
    ):
        super(ConvBNLayer, self).__init__()

        self.is_vd_mode = is_vd_mode
        self._pool2d_avg = nn.AvgPool2D(kernel_size=2,
                                        stride=2,
                                        padding=0,
                                        ceil_mode=True)
        self._conv = nn.Conv2D(in_channels=in_channels,
                               out_channels=out_channels,
                               kernel_size=kernel_size,
                               stride=stride,
                               padding=(kernel_size - 1) //
                               2 if dilation == 1 else 0,
                               dilation=dilation,
                               groups=groups,
                               bias_attr=False)

        self._batch_norm = layers.SyncBatchNorm(out_channels)
        self._act_op = layers.Activation(act=act)
Example #5
0
    def __init__(self, in_dim, out_dim):
        super(ContextEmbeddingBlock, self).__init__()

        self.gap = nn.AdaptiveAvgPool2D(1)
        self.bn = layers.SyncBatchNorm(in_dim)

        self.conv_1x1 = layers.ConvBNReLU(in_dim, out_dim, 1)
        self.conv_3x3 = nn.Conv2D(out_dim, out_dim, 3, 1, 1)
Example #6
0
 def __init__(self, out_channels, **kwargs):
     super().__init__()
     if 'data_format' in kwargs:
         data_format = kwargs['data_format']
     else:
         data_format = 'NCHW'
     self._batch_norm = layers.SyncBatchNorm(out_channels,
                                             data_format=data_format)
     self._prelu = layers.Activation("prelu")
Example #7
0
    def __init__(self, in_channels, num_state=256, num_node=84, nclass=59):
        super().__init__()
        self.num_state = num_state
        self.conv_theta = nn.Conv2D(
            in_channels, num_node, kernel_size=1, stride=1, padding=0)
        self.conv_phi = nn.Conv2D(
            in_channels, num_state, kernel_size=1, stride=1, padding=0)
        self.graph = GraphLayer(num_state, num_node, nclass)
        self.extend_dim = nn.Conv2D(
            num_state, in_channels, kernel_size=1, bias_attr=False)

        self.bn = layers.SyncBatchNorm(in_channels)
Example #8
0
 def __init__(self,
              in_channels,
              out_channels,
              kernel_size=1,
              stride=1,
              padding=0,
              dilation=1,
              groups=1,
              bias_attr=False):
     super().__init__()
     self._gate_conv = nn.Sequential(
         layers.SyncBatchNorm(in_channels + 1),
         nn.Conv2D(in_channels + 1, in_channels + 1, kernel_size=1),
         nn.ReLU(), nn.Conv2D(in_channels + 1, 1, kernel_size=1),
         layers.SyncBatchNorm(1), nn.Sigmoid())
     self.conv = nn.Conv2D(in_channels,
                           out_channels,
                           kernel_size=kernel_size,
                           stride=stride,
                           padding=padding,
                           dilation=dilation,
                           groups=groups,
                           bias_attr=bias_attr)
Example #9
0
    def __init__(self,
                 num_classes,
                 in_channels=3,
                 level2_depth=2,
                 level3_depth=3,
                 pretrained=None):
        super().__init__()
        self.encoder = ESPNetEncoder(num_classes, in_channels, level2_depth,
                                     level3_depth)

        self.level3_up = nn.Conv2DTranspose(num_classes,
                                            num_classes,
                                            2,
                                            stride=2,
                                            padding=0,
                                            output_padding=0,
                                            bias_attr=False)
        self.br3 = layers.SyncBatchNorm(num_classes)
        self.level2_proj = nn.Conv2D(in_channels + 128,
                                     num_classes,
                                     1,
                                     bias_attr=False)
        self.combine_l2_l3 = nn.Sequential(
            BNPReLU(2 * num_classes),
            DilatedResidualBlock(2 * num_classes, num_classes, residual=False),
        )
        self.level2_up = nn.Sequential(
            nn.Conv2DTranspose(num_classes,
                               num_classes,
                               2,
                               stride=2,
                               padding=0,
                               output_padding=0,
                               bias_attr=False),
            BNPReLU(num_classes),
        )
        self.out_proj = layers.ConvBNPReLU(16 + in_channels + num_classes,
                                           num_classes,
                                           3,
                                           padding='same',
                                           stride=1)
        self.out_up = nn.Conv2DTranspose(num_classes,
                                         num_classes,
                                         2,
                                         stride=2,
                                         padding=0,
                                         output_padding=0,
                                         bias_attr=False)
        self.pretrained = pretrained
Example #10
0
    def __init__(self,
                 input_channels,
                 output_channels,
                 stride,
                 filter,
                 dilation=1,
                 act=None,
                 name=None):
        super(Seperate_Conv, self).__init__()

        self._conv1 = nn.Conv2D(in_channels=input_channels,
                                out_channels=input_channels,
                                kernel_size=filter,
                                stride=stride,
                                groups=input_channels,
                                padding=(filter) // 2 * dilation,
                                dilation=dilation,
                                bias_attr=False)
        self._bn1 = layers.SyncBatchNorm(input_channels,
                                         epsilon=1e-3,
                                         momentum=0.99)

        self._act_op1 = layers.Activation(act=act)

        self._conv2 = nn.Conv2D(input_channels,
                                output_channels,
                                1,
                                stride=1,
                                groups=1,
                                padding=0,
                                bias_attr=False)
        self._bn2 = layers.SyncBatchNorm(output_channels,
                                         epsilon=1e-3,
                                         momentum=0.99)

        self._act_op2 = layers.Activation(act=act)
Example #11
0
    def __init__(self,
                 inplane,
                 num_class,
                 fpn_inplanes,
                 fpn_dim=256,
                 enable_auxiliary_loss=False):
        super(SFNetHead, self).__init__()
        self.ppm = layers.PPModule(in_channels=inplane,
                                   out_channels=fpn_dim,
                                   bin_sizes=(1, 2, 3, 6),
                                   dim_reduction=True,
                                   align_corners=True)
        self.enable_auxiliary_loss = enable_auxiliary_loss
        self.fpn_in = []

        for fpn_inplane in fpn_inplanes[:-1]:
            self.fpn_in.append(
                nn.Sequential(nn.Conv2D(fpn_inplane, fpn_dim, 1),
                              layers.SyncBatchNorm(fpn_dim), nn.ReLU()))

        self.fpn_in = nn.LayerList(self.fpn_in)
        self.fpn_out = []
        self.fpn_out_align = []
        self.dsn = []
        for i in range(len(fpn_inplanes) - 1):
            self.fpn_out.append(
                nn.Sequential(
                    layers.ConvBNReLU(fpn_dim, fpn_dim, 3, bias_attr=False)))
            self.fpn_out_align.append(
                AlignedModule(inplane=fpn_dim, outplane=fpn_dim // 2))
            if self.enable_auxiliary_loss:
                self.dsn.append(
                    nn.Sequential(layers.AuxLayer(fpn_dim, fpn_dim,
                                                  num_class)))

        self.fpn_out = nn.LayerList(self.fpn_out)
        self.fpn_out_align = nn.LayerList(self.fpn_out_align)

        if self.enable_auxiliary_loss:
            self.dsn = nn.LayerList(self.dsn)

        self.conv_last = nn.Sequential(
            layers.ConvBNReLU(len(fpn_inplanes) * fpn_dim,
                              fpn_dim,
                              3,
                              bias_attr=False),
            nn.Conv2D(fpn_dim, num_class, kernel_size=1))
Example #12
0
    def __init__(self, in_channels, out_channels, bias=False, relu=True):
        super(InitialBlock, self).__init__()

        if relu:
            activation = nn.ReLU
        else:
            activation = nn.PReLU

        self.main_branch = nn.Conv2D(in_channels,
                                     out_channels - 3,
                                     kernel_size=3,
                                     stride=2,
                                     padding=1,
                                     bias_attr=bias)

        self.ext_branch = nn.MaxPool2D(3, stride=2, padding=1)

        self.batch_norm = layers.SyncBatchNorm(out_channels)

        self.out_activation = activation()
Example #13
0
 def __init__(self, in_channels, out_channels):
     super().__init__()
     branch_channels = out_channels // 5
     remain_channels = out_channels - branch_channels * 4
     self.conv1 = nn.Conv2D(in_channels,
                            branch_channels,
                            3,
                            stride=2,
                            padding=1,
                            bias_attr=False)
     self.d_conv1 = nn.Conv2D(branch_channels,
                              remain_channels,
                              3,
                              padding=1,
                              bias_attr=False)
     self.d_conv2 = nn.Conv2D(branch_channels,
                              branch_channels,
                              3,
                              padding=2,
                              dilation=2,
                              bias_attr=False)
     self.d_conv4 = nn.Conv2D(branch_channels,
                              branch_channels,
                              3,
                              padding=4,
                              dilation=4,
                              bias_attr=False)
     self.d_conv8 = nn.Conv2D(branch_channels,
                              branch_channels,
                              3,
                              padding=8,
                              dilation=8,
                              bias_attr=False)
     self.d_conv16 = nn.Conv2D(branch_channels,
                               branch_channels,
                               3,
                               padding=16,
                               dilation=16,
                               bias_attr=False)
     self.bn = layers.SyncBatchNorm(out_channels)
     self.act = nn.PReLU(out_channels)
Example #14
0
    def __init__(self, filter_size, fusion, in_channels, channels):
        super().__init__()
        self.filter_size = filter_size
        self.fusion = fusion
        self.channels = channels

        pad = (self.filter_size - 1) // 2
        if (self.filter_size - 1) % 2 == 0:
            self.pad = (pad, pad, pad, pad)
        else:
            self.pad = (pad + 1, pad, pad + 1, pad)

        self.avg_pool = nn.AdaptiveAvgPool2D(filter_size)
        self.filter_gen_conv = nn.Conv2D(in_channels, channels, 1)
        self.input_redu_conv = layers.ConvBNReLU(in_channels, channels, 1)

        self.norm = layers.SyncBatchNorm(channels)
        self.act = nn.ReLU()

        if self.fusion:
            self.fusion_conv = layers.ConvBNReLU(channels, channels, 1)
Example #15
0
    def __init__(self,
                 input_channels,
                 output_channels,
                 filter_size,
                 stride=1,
                 padding=0,
                 act=None,
                 name=None):
        super(ConvBNLayer, self).__init__()

        self._conv = nn.Conv2D(in_channels=input_channels,
                               out_channels=output_channels,
                               kernel_size=filter_size,
                               stride=stride,
                               padding=padding,
                               bias_attr=False)
        self._bn = layers.SyncBatchNorm(num_features=output_channels,
                                        epsilon=1e-3,
                                        momentum=0.99)

        self._act_op = layers.Activation(act=act)
Example #16
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 kernel_size,
                 stride,
                 padding,
                 groups=1,
                 **kwargs):
        super().__init__()
        weight_attr = paddle.ParamAttr(
            learning_rate=1, initializer=nn.initializer.KaimingUniform())
        self._conv = nn.Conv2D(in_channels,
                               out_channels,
                               kernel_size,
                               padding=padding,
                               stride=stride,
                               groups=groups,
                               weight_attr=weight_attr,
                               bias_attr=False,
                               **kwargs)

        self._batch_norm = layers.SyncBatchNorm(out_channels)
Example #17
0
    def __init__(
        self,
        fpn_inplanes=[256, 512, 1024, 2048],
        fpn_outplanes=256,
    ):
        super(FPNNeck, self).__init__()
        self.lateral_convs = []
        self.fpn_out = []

        # FPN head
        for fpn_inplane in fpn_inplanes:
            self.lateral_convs.append(
                nn.Sequential(nn.Conv2D(fpn_inplane, fpn_outplanes, 1),
                              layers.SyncBatchNorm(fpn_outplanes), nn.ReLU()))
            self.fpn_out.append(
                nn.Sequential(
                    layers.ConvBNReLU(fpn_outplanes,
                                      fpn_outplanes,
                                      3,
                                      bias_attr=False)))

        self.lateral_convs = nn.LayerList(self.lateral_convs)
        self.fpn_out = nn.LayerList(self.fpn_out)
Example #18
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 kernel_size,
                 stride=1,
                 dilation=1,
                 groups=1,
                 is_vd_mode=False,
                 act=None,
                 data_format='NCHW'):
        super(ConvBNLayer, self).__init__()
        if dilation != 1 and kernel_size != 3:
            raise RuntimeError("When the dilation isn't 1," \
                "the kernel_size should be 3.")

        self.is_vd_mode = is_vd_mode
        self._pool2d_avg = nn.AvgPool2D(kernel_size=2,
                                        stride=2,
                                        padding=0,
                                        ceil_mode=True,
                                        data_format=data_format)
        self._conv = nn.Conv2D(
            in_channels=in_channels,
            out_channels=out_channels,
            kernel_size=kernel_size,
            stride=stride,
            padding=(kernel_size - 1) // 2 \
                if dilation == 1 else dilation,
            dilation=dilation,
            groups=groups,
            bias_attr=False,
            data_format=data_format)

        self._batch_norm = layers.SyncBatchNorm(out_channels,
                                                data_format=data_format)
        self._act_op = layers.Activation(act=act)
Example #19
0
    def __init__(self,
                 channels,
                 internal_ratio=4,
                 kernel_size=3,
                 padding=0,
                 dilation=1,
                 asymmetric=False,
                 dropout_prob=0,
                 bias=False,
                 relu=True):
        super(RegularBottleneck, self).__init__()

        if internal_ratio <= 1 or internal_ratio > channels:
            raise RuntimeError(
                "Value out of range. Expected value in the "
                "interval [1, {0}], got internal_scale={1}.".format(
                    channels, internal_ratio))

        internal_channels = channels // internal_ratio

        if relu:
            activation = nn.ReLU
        else:
            activation = nn.PReLU

        self.ext_conv1 = nn.Sequential(
            nn.Conv2D(channels,
                      internal_channels,
                      kernel_size=1,
                      stride=1,
                      bias_attr=bias), layers.SyncBatchNorm(internal_channels),
            activation())

        if asymmetric:
            self.ext_conv2 = nn.Sequential(
                nn.Conv2D(internal_channels,
                          internal_channels,
                          kernel_size=(kernel_size, 1),
                          stride=1,
                          padding=(padding, 0),
                          dilation=dilation,
                          bias_attr=bias),
                layers.SyncBatchNorm(internal_channels), activation(),
                nn.Conv2D(internal_channels,
                          internal_channels,
                          kernel_size=(1, kernel_size),
                          stride=1,
                          padding=(0, padding),
                          dilation=dilation,
                          bias_attr=bias),
                layers.SyncBatchNorm(internal_channels), activation())
        else:
            self.ext_conv2 = nn.Sequential(
                nn.Conv2D(internal_channels,
                          internal_channels,
                          kernel_size=kernel_size,
                          stride=1,
                          padding=padding,
                          dilation=dilation,
                          bias_attr=bias),
                layers.SyncBatchNorm(internal_channels), activation())

        self.ext_conv3 = nn.Sequential(
            nn.Conv2D(internal_channels,
                      channels,
                      kernel_size=1,
                      stride=1,
                      bias_attr=bias), layers.SyncBatchNorm(channels),
            activation())

        self.ext_regul = nn.Dropout2D(p=dropout_prob)

        self.out_activation = activation()
Example #20
0
 def __init__(self, channels):
     super().__init__()
     self.bn = layers.SyncBatchNorm(channels)
     self.act = nn.PReLU(channels)
Example #21
0
    def __init__(self,
                 num_class,
                 fpn_inplanes,
                 channels,
                 dropout_ratio=0.1,
                 fpn_dim=256,
                 enable_auxiliary_loss=False,
                 align_corners=False):
        super(PFPNHead, self).__init__()
        self.enable_auxiliary_loss = enable_auxiliary_loss
        self.align_corners = align_corners
        self.lateral_convs = nn.LayerList()
        self.fpn_out = nn.LayerList()

        for fpn_inplane in fpn_inplanes:
            self.lateral_convs.append(
                nn.Sequential(nn.Conv2D(fpn_inplane, fpn_dim, 1),
                              layers.SyncBatchNorm(fpn_dim), nn.ReLU()))
            self.fpn_out.append(
                nn.Sequential(
                    layers.ConvBNReLU(fpn_dim, fpn_dim, 3, bias_attr=False)))

        self.scale_heads = nn.LayerList()
        for index in range(len(fpn_inplanes)):
            head_length = max(
                1,
                int(np.log2(fpn_inplanes[index]) - np.log2(fpn_inplanes[0])))
            scale_head = nn.LayerList()
            for head_index in range(head_length):
                scale_head.append(
                    layers.ConvBNReLU(
                        fpn_dim,
                        channels,
                        3,
                        padding=1,
                    ))
                if fpn_inplanes[index] != fpn_inplanes[0]:
                    scale_head.append(
                        nn.Upsample(scale_factor=2,
                                    mode='bilinear',
                                    align_corners=align_corners))
            self.scale_heads.append(nn.Sequential(*scale_head))

        if dropout_ratio:
            self.dropout = nn.Dropout2D(dropout_ratio)
            if self.enable_auxiliary_loss:
                self.dsn = nn.Sequential(
                    layers.ConvBNReLU(fpn_inplanes[2],
                                      fpn_inplanes[2],
                                      3,
                                      padding=1), nn.Dropout2D(dropout_ratio),
                    nn.Conv2D(fpn_inplanes[2], num_class, kernel_size=1))
        else:
            self.dropout = None
            if self.enable_auxiliary_loss:
                self.dsn = nn.Sequential(
                    layers.ConvBNReLU(fpn_inplanes[2],
                                      fpn_inplanes[2],
                                      3,
                                      padding=1),
                    nn.Conv2D(fpn_inplanes[2], num_class, kernel_size=1))

        self.conv_last = nn.Sequential(
            layers.ConvBNReLU(len(fpn_inplanes) * fpn_dim,
                              fpn_dim,
                              3,
                              bias_attr=False),
            nn.Conv2D(fpn_dim, num_class, kernel_size=1))
        self.conv_seg = nn.Conv2D(channels, num_class, kernel_size=1)