Exemplo n.º 1
0
 def __init__(self, num_classes=10, max_points=1024):
     super(PointNet_Basic_Clas, self).__init__()
     self.mlp_1 = nn.Sequential(
         nn.Conv1D(3, 64, 1),
         nn.BatchNorm(64),
         nn.ReLU(),
         nn.Conv1D(64, 64, 1),
         nn.BatchNorm(64),
         nn.ReLU(),
     )
     self.mlp_2 = nn.Sequential(
         nn.Conv1D(64, 64, 1),
         nn.BatchNorm(64),
         nn.ReLU(),
         nn.Conv1D(64, 128, 1),
         nn.BatchNorm(128),
         nn.ReLU(),
         nn.Conv1D(128, max_points, 1),
         nn.BatchNorm(max_points),
         nn.ReLU(),
     )
     self.fc = self.fc = nn.Sequential(nn.Linear(1024, 512), nn.ReLU(),
                                       nn.Linear(512, 256), nn.ReLU(),
                                       nn.Dropout(p=0.7),
                                       nn.Linear(256, num_classes))
Exemplo n.º 2
0
    def __init__(self, num_channels, num_filters):
        super(Decoder, self).__init__()

        self.conv1 = nn.Conv2D(num_channels,
                               num_filters,
                               kernel_size=3,
                               stride=1,
                               padding=1,
                               bias_attr=False)
        self.bn1 = nn.BatchNorm(num_filters, act='relu')

        self.conv2 = nn.Conv2D(num_filters,
                               num_filters,
                               kernel_size=3,
                               stride=1,
                               padding=1,
                               bias_attr=False)
        self.bn2 = nn.BatchNorm(num_filters, act='relu')

        self.conv0 = nn.Conv2D(num_channels,
                               num_filters,
                               kernel_size=1,
                               stride=1,
                               padding=0,
                               bias_attr=False)
        self.bn0 = nn.BatchNorm(num_filters, act='relu')
Exemplo n.º 3
0
 def __init__(self, in_planes, planes, stride=1, downsample=None, reduction=16):
     super(SEBasicBlock, self).__init__()
     self.conv1 = nn.Conv2D(in_planes, planes, kernel_size=3, stride=stride, padding=1)
     self.bn1 = nn.BatchNorm(planes)
     self.conv2 = nn.Conv2D(planes, planes, kernel_size=3, stride=1, padding=1)
     self.bn2 = nn.BatchNorm(planes)
     self.se = SELayer(planes, reduction)
     self.downsample = downsample
     self.stride = stride
Exemplo n.º 4
0
 def __init__(self, in_channels, n_filters):
     super(DecoderBlock, self).__init__()
     self.conv1 = nn.Conv2D(in_channels, in_channels//4, 1)
     self.norm1 = nn.BatchNorm(in_channels//4)
     self.scse = SCSEBlock(in_channels//4)
     self.deconv2 = nn.Conv2DTranspose(in_channels//4, in_channels//4, 3, stride=2, padding=1, output_padding=1)
     self.norm2 = nn.BatchNorm(in_channels//4)
     self.conv3 = nn.Conv2D(in_channels//4, n_filters, 1)
     self.norm3 = nn.BatchNorm(n_filters)
 def __init__(self, inplanes, planes, stride=1, downsample=None):
     super(BasicBlock, self).__init__()
     self.conv1 = conv3x3(inplanes, planes, stride)
     self.bn1 = nn.BatchNorm(planes)
     self.relu = nn.ReLU()
     self.conv2 = conv3x3(planes, planes)
     self.bn2 = nn.BatchNorm(planes)
     self.downsample = downsample
     self.stride = stride
Exemplo n.º 6
0
 def __init__(self, feature_channels=256, num_classes=50, max_points=1024):
     super(VFE_Seg, self).__init__()
     self.max_points = max_points
     self.vfe = VFE(feature_channels, max_points)
     self.seg_net = nn.Sequential(
         nn.Conv1D(max_points + feature_channels * 2, 512, 1),
         nn.BatchNorm(512), nn.ReLU(), nn.Conv1D(512, 256, 1),
         nn.BatchNorm(256), nn.ReLU(), nn.Conv1D(256, 128, 1),
         nn.BatchNorm(128), nn.ReLU(), nn.Conv1D(128, 128, 1),
         nn.BatchNorm(128), nn.ReLU(), nn.Conv1D(128, num_classes, 1))
Exemplo n.º 7
0
 def __init__(self, num_classes=50, max_points=1024):
     super(PointNet_Basic_Seg, self).__init__()
     self.max_points = max_points
     self.pointnet_bacic = PointNet_Basic(max_points)
     self.seg_net = nn.Sequential(nn.Conv1D(max_points + 64, 512, 1),
                                  nn.BatchNorm(512), nn.ReLU(),
                                  nn.Conv1D(512, 256, 1), nn.BatchNorm(256),
                                  nn.ReLU(), nn.Conv1D(256, 128, 1),
                                  nn.BatchNorm(128), nn.ReLU(),
                                  nn.Conv1D(128, 128, 1), nn.BatchNorm(128),
                                  nn.ReLU(), nn.Conv1D(128, num_classes, 1))
Exemplo n.º 8
0
 def __init__(self, in_features, kernel_size, padding, **kwargs):
     super(ResBlock2d, self).__init__(**kwargs)
     self.conv1 = nn.Conv2D(in_features,
                            in_features,
                            kernel_size=kernel_size,
                            padding=padding)
     self.conv2 = nn.Conv2D(in_features,
                            in_features,
                            kernel_size=kernel_size,
                            padding=padding)
     self.norm1 = nn.BatchNorm(num_channels=in_features)
     self.norm2 = nn.BatchNorm(num_channels=in_features)
Exemplo n.º 9
0
 def __init__(self, in_channels, name_list):
     super(Head, self).__init__()
     self.conv1 = nn.Conv2D(in_channels=in_channels,
                            out_channels=in_channels // 4,
                            kernel_size=3,
                            padding=1,
                            weight_attr=ParamAttr(name=name_list[0] +
                                                  '.w_0'),
                            bias_attr=False)
     self.conv_bn1 = nn.BatchNorm(
         num_channels=in_channels // 4,
         param_attr=ParamAttr(
             name=name_list[1] + '.w_0',
             initializer=paddle.nn.initializer.Constant(value=1.0)),
         bias_attr=ParamAttr(
             name=name_list[1] + '.b_0',
             initializer=paddle.nn.initializer.Constant(value=1e-4)),
         moving_mean_name=name_list[1] + '.w_1',
         moving_variance_name=name_list[1] + '.w_2',
         act='relu')
     self.conv2 = nn.Conv2DTranspose(
         in_channels=in_channels // 4,
         out_channels=in_channels // 4,
         kernel_size=2,
         stride=2,
         weight_attr=ParamAttr(
             name=name_list[2] + '.w_0',
             initializer=paddle.nn.initializer.KaimingUniform()),
         bias_attr=get_bias_attr(in_channels // 4, name_list[-1] + "conv2"))
     self.conv_bn2 = nn.BatchNorm(
         num_channels=in_channels // 4,
         param_attr=ParamAttr(
             name=name_list[3] + '.w_0',
             initializer=paddle.nn.initializer.Constant(value=1.0)),
         bias_attr=ParamAttr(
             name=name_list[3] + '.b_0',
             initializer=paddle.nn.initializer.Constant(value=1e-4)),
         moving_mean_name=name_list[3] + '.w_1',
         moving_variance_name=name_list[3] + '.w_2',
         act="relu")
     self.conv3 = nn.Conv2DTranspose(
         in_channels=in_channels // 4,
         out_channels=1,
         kernel_size=2,
         stride=2,
         weight_attr=ParamAttr(
             name=name_list[4] + '.w_0',
             initializer=paddle.nn.initializer.KaimingUniform()),
         bias_attr=get_bias_attr(in_channels // 4, name_list[-1] + "conv3"),
     )
Exemplo n.º 10
0
 def __init__(self,
              in_channels,
              out_channels,
              is_batchnorm,
              num_conv=2,
              kernel_size=3,
              stride=1,
              padding=1):
     super(UnetConv2D, self).__init__()
     self.num_conv = num_conv
     for i in range(num_conv):
         conv = (nn.Sequential(nn.Conv2D(in_channels, out_channels, kernel_size, stride, padding),
                               nn.BatchNorm(out_channels),
                               nn.ReLU()) \
                 if is_batchnorm else \
                 nn.Sequential(nn.Conv2D(in_channels, out_channels, kernel_size, stride, padding),
                               nn.ReLU()))
         setattr(self, 'conv%d' % (i + 1), conv)
         in_channels = out_channels
     # initialise the blocks
     for children in self.children():
         children.weight_attr = paddle.framework.ParamAttr(
             initializer=paddle.nn.initializer.KaimingNormal)
         children.bias_attr = paddle.framework.ParamAttr(
             initializer=paddle.nn.initializer.KaimingNormal)
Exemplo n.º 11
0
    def __init__(
            self,
            in_channels,
            out_channels,
            kernel_size,
            stride=1,
            groups=1,
            is_vd_mode=False,
            act=None,
            name=None, ):
        super(ConvBNLayer, self).__init__()

        self.is_vd_mode = is_vd_mode
        self._pool2d_avg = nn.AvgPool2D(
            kernel_size=2, stride=2, padding=0, ceil_mode=True)
        self._conv = nn.Conv2D(
            in_channels=in_channels,
            out_channels=out_channels,
            kernel_size=kernel_size,
            stride=stride,
            padding=(kernel_size - 1) // 2,
            groups=groups,
            weight_attr=ParamAttr(name=name + "_weights"),
            bias_attr=False)
        if name == "conv1":
            bn_name = "bn_" + name
        else:
            bn_name = "bn" + name[3:]
        self._batch_norm = nn.BatchNorm(
            out_channels,
            act=act,
            param_attr=ParamAttr(name=bn_name + '_scale'),
            bias_attr=ParamAttr(bn_name + '_offset'),
            moving_mean_name=bn_name + '_mean',
            moving_variance_name=bn_name + '_variance')
Exemplo n.º 12
0
    def __init__(self,
                 num_channels,
                 num_filters,
                 filter_size,
                 stride=1,
                 dilation=1,
                 groups=1,
                 act=None,
                 lr_mult=1.0,
                 name=None,
                 data_format="NCHW"):
        super(ConvBNLayer, self).__init__()
        conv_stdv = filter_size * filter_size * num_filters
        self._conv = nn.Conv2D(in_channels=num_channels,
                               out_channels=num_filters,
                               kernel_size=filter_size,
                               stride=stride,
                               padding=(filter_size - 1) // 2,
                               dilation=dilation,
                               groups=groups,
                               weight_attr=ParamAttr(learning_rate=lr_mult,
                                                     initializer=Normal(
                                                         0,
                                                         math.sqrt(
                                                             2. / conv_stdv))),
                               bias_attr=False,
                               data_format=data_format)

        self._batch_norm = nn.BatchNorm(num_filters,
                                        act=act,
                                        data_layout=data_format)
Exemplo n.º 13
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 kernel_size,
                 stride=1,
                 groups=1,
                 act=None,
                 name=None):
        super(ConvBNLayer, self).__init__()
        self.conv = nn.Conv2D(
            in_channels=in_channels,
            out_channels=out_channels,
            kernel_size=2 if stride == (1, 1) else kernel_size,
            dilation=2 if stride == (1, 1) else 1,
            stride=stride,
            padding=(kernel_size - 1) // 2,
            groups=groups,
            weight_attr=ParamAttr(name=name + '.conv2d.output.1.w_0'),
            bias_attr=False,
        )

        if name == "conv1":
            bn_name = "bn_" + name
        else:
            bn_name = "bn" + name[3:]
        self.bn = nn.BatchNorm(
            num_channels=out_channels,
            act=act,
            param_attr=ParamAttr(name=name + '.output.1.w_0'),
            bias_attr=ParamAttr(name=name + '.output.1.b_0'),
            moving_mean_name=bn_name + "_mean",
            moving_variance_name=bn_name + "_variance")
Exemplo n.º 14
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 kernel_size=4,
                 stride=2,
                 padding=1,
                 groups=1,
                 if_act=True,
                 act=None,
                 name=None):
        super(DeConvBNLayer, self).__init__()

        self.if_act = if_act
        self.act = act
        self.deconv = nn.Conv2DTranspose(in_channels=in_channels,
                                         out_channels=out_channels,
                                         kernel_size=kernel_size,
                                         stride=stride,
                                         padding=padding,
                                         groups=groups,
                                         weight_attr=ParamAttr(name=name +
                                                               '_weights'),
                                         bias_attr=False)
        self.bn = nn.BatchNorm(
            num_channels=out_channels,
            act=act,
            param_attr=ParamAttr(name="bn_" + name + "_scale"),
            bias_attr=ParamAttr(name="bn_" + name + "_offset"),
            moving_mean_name="bn_" + name + "_mean",
            moving_variance_name="bn_" + name + "_variance",
            use_global_stats=False)
Exemplo n.º 15
0
 def __init__(self,
              up_in_c: int,
              x_in_c: int,
              n_out: int,
              hook,
              final_div: bool = True,
              blur: bool = False,
              leaky: float = None,
              self_attention: bool = False,
              **kwargs):
     super().__init__()
     self.hook = hook
     up_out = x_out = n_out // 2
     self.shuf = CustomPixelShuffle_ICNR(up_in_c,
                                         up_out,
                                         blur=blur,
                                         leaky=leaky,
                                         **kwargs)
     self.bn = nn.BatchNorm(x_in_c)
     ni = up_out + x_in_c
     self.conv = custom_conv_layer(ni,
                                   x_out,
                                   leaky=leaky,
                                   self_attention=self_attention,
                                   **kwargs)
     self.relu = relu(leaky=leaky)
Exemplo n.º 16
0
    def __init__(
            self,
            up_in_c: int,
            x_in_c: int,
            # hook: Hook,
            final_div: bool = True,
            blur: bool = False,
            leaky: float = None,
            self_attention: bool = False,
            nf_factor: float = 1.0,
            **kwargs):
        super().__init__()

        self.shuf = CustomPixelShuffle_ICNR(up_in_c,
                                            up_in_c // 2,
                                            blur=blur,
                                            leaky=leaky,
                                            **kwargs)
        self.bn = nn.BatchNorm(x_in_c)
        ni = up_in_c // 2 + x_in_c
        nf = int((ni if final_div else ni // 2) * nf_factor)
        self.conv1 = custom_conv_layer(ni, nf, leaky=leaky, **kwargs)
        self.conv2 = custom_conv_layer(nf,
                                       nf,
                                       leaky=leaky,
                                       self_attention=self_attention,
                                       **kwargs)
        self.relu = relu(leaky=leaky)
Exemplo n.º 17
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 kernel_size,
                 stride,
                 padding,
                 num_groups=1,
                 act='relu',
                 conv_lr=0.1,
                 conv_decay=0.,
                 norm_decay=0.,
                 norm_type='bn',
                 name=None):
        super(ConvBNLayer, self).__init__()
        self.act = act
        self._conv = nn.Conv2D(
            in_channels,
            out_channels,
            kernel_size=kernel_size,
            stride=stride,
            padding=padding,
            groups=num_groups,
            weight_attr=ParamAttr(
                learning_rate=conv_lr, initializer=KaimingNormal()),
            bias_attr=False)

        if norm_type == 'sync_bn':
            self._batch_norm = nn.SyncBatchNorm(out_channels)
        else:
            self._batch_norm = nn.BatchNorm(
                out_channels, act=None, use_global_stats=False)
Exemplo n.º 18
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 kernel_size,
                 stride,
                 padding,
                 groups=1,
                 if_act=True,
                 act=None,
                 name=None):
        super(ConvBNLayer, self).__init__()
        self.if_act = if_act
        self.act = act
        self.conv = nn.Conv2D(in_channels=in_channels,
                              out_channels=out_channels,
                              kernel_size=kernel_size,
                              stride=stride,
                              padding=padding,
                              groups=groups,
                              weight_attr=ParamAttr(name=name + '_weights'),
                              bias_attr=False)

        self.bn = nn.BatchNorm(num_channels=out_channels,
                               act=None,
                               param_attr=ParamAttr(name=name + "_bn_scale"),
                               bias_attr=ParamAttr(name=name + "_bn_offset"),
                               moving_mean_name=name + "_bn_mean",
                               moving_variance_name=name + "_bn_variance")
Exemplo n.º 19
0
    def __init__(self,
                 ch_in: int,
                 ch_out: int,
                 filter_size: int = 3,
                 stride: int = 1,
                 groups: int = 1,
                 padding: int = 0,
                 act: str = 'leakly',
                 is_test: bool = False):
        super(ConvBNLayer, self).__init__()

        self.conv = nn.Conv2d(
            ch_in,
            ch_out,
            filter_size,
            padding=padding,
            stride=stride,
            groups=groups,
            weight_attr=paddle.ParamAttr(initializer=Normal(0., 0.02)),
            bias_attr=False)

        self.batch_norm = nn.BatchNorm(num_channels=ch_out,
                                       is_test=is_test,
                                       param_attr=paddle.ParamAttr(
                                           initializer=Normal(0., 0.02),
                                           regularizer=L2Decay(0.)))
        self.act = act
Exemplo n.º 20
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 kernel_size,
                 stride=1,
                 groups=1,
                 act="relu",
                 name=None):
        super(ConvBNLayer, self).__init__()
        self._conv = nn.Conv2D(in_channels=in_channels,
                               out_channels=out_channels,
                               kernel_size=kernel_size,
                               stride=stride,
                               padding=(kernel_size - 1) // 2,
                               groups=groups,
                               weight_attr=ParamAttr(
                                   initializer=KaimingNormal(),
                                   name=name + "_weights"),
                               bias_attr=False)
        bn_name = name + "_bn"

        self._batch_norm = nn.BatchNorm(
            num_channels=out_channels,
            act=act,
            param_attr=ParamAttr(name=bn_name + "_scale",
                                 regularizer=paddle.regularizer.L2Decay(0.0)),
            bias_attr=ParamAttr(name=bn_name + "_offset",
                                regularizer=paddle.regularizer.L2Decay(0.0)),
            moving_mean_name=bn_name + "_mean",
            moving_variance_name=bn_name + "_variance")
Exemplo n.º 21
0
    def __init__(self,
                 num_channels,
                 num_filters,
                 filter_size,
                 stride=1,
                 groups=1,
                 act="relu",
                 name=None):
        super(ConvBNLayer, self).__init__()

        self._conv = nn.Conv2D(in_channels=num_channels,
                               out_channels=num_filters,
                               kernel_size=filter_size,
                               stride=stride,
                               padding=(filter_size - 1) // 2,
                               groups=groups,
                               weight_attr=ParamAttr(name=name + "_weights"),
                               bias_attr=False)
        bn_name = name + '_bn'
        self._batch_norm = nn.BatchNorm(
            num_filters,
            act=act,
            param_attr=ParamAttr(name=bn_name + '_scale'),
            bias_attr=ParamAttr(bn_name + '_offset'),
            moving_mean_name=bn_name + '_mean',
            moving_variance_name=bn_name + '_variance')
Exemplo n.º 22
0
 def __init__(self, name_scope='VoxNet_', num_classes=10):
     super(VoxNet, self).__init__()
     self.backbone = nn.Sequential(nn.Conv3D(1, 32, 5, 2), nn.BatchNorm(32),
                                   nn.LeakyReLU(), nn.Conv3D(32, 32, 3, 1),
                                   nn.MaxPool3D(2, 2, 0))
     self.head = nn.Sequential(nn.Linear(32 * 6 * 6 * 6, 128),
                               nn.LeakyReLU(), nn.Dropout(0.2),
                               nn.Linear(128, num_classes))
Exemplo n.º 23
0
 def __init__(self, in_chan, out_chan, stride=1):
     super(BasicBlock, self).__init__()
     self.conv1 = conv3x3(in_chan, out_chan, stride)
     self.bn1 = nn.BatchNorm(out_chan)
     self.conv2 = conv3x3(out_chan, out_chan)
     self.bn2 = nn.BatchNorm(out_chan)
     self.relu = nn.ReLU()
     self.downsample = None
     if in_chan != out_chan or stride != 1:
         self.downsample = nn.Sequential(
             nn.Conv2d(in_chan,
                       out_chan,
                       kernel_size=1,
                       stride=stride,
                       bias_attr=False),
             nn.BatchNorm(out_chan),
         )
Exemplo n.º 24
0
    def __init__(self,
                 in_c,
                 out_c,
                 filter_size,
                 stride,
                 padding,
                 num_groups=1,
                 act=None,
                 lr_mult=1.,
                 conv_decay=0.,
                 norm_type='bn',
                 norm_decay=0.,
                 freeze_norm=False,
                 name=""):
        super(ConvBNLayer, self).__init__()
        self.act = act
        self.conv = nn.Conv2D(
            in_channels=in_c,
            out_channels=out_c,
            kernel_size=filter_size,
            stride=stride,
            padding=padding,
            groups=num_groups,
            weight_attr=ParamAttr(
                learning_rate=lr_mult,
                regularizer=L2Decay(conv_decay),
                name=name + "_weights"),
            bias_attr=False)

        norm_lr = 0. if freeze_norm else lr_mult
        param_attr = ParamAttr(
            learning_rate=norm_lr,
            regularizer=L2Decay(norm_decay),
            name=name + "_bn_scale",
            trainable=False if freeze_norm else True)
        bias_attr = ParamAttr(
            learning_rate=norm_lr,
            regularizer=L2Decay(norm_decay),
            name=name + "_bn_offset",
            trainable=False if freeze_norm else True)
        global_stats = True if freeze_norm else False
        if norm_type == 'sync_bn':
            self.bn = nn.SyncBatchNorm(
                out_c, weight_attr=param_attr, bias_attr=bias_attr)
        else:
            self.bn = nn.BatchNorm(
                out_c,
                act=None,
                param_attr=param_attr,
                bias_attr=bias_attr,
                use_global_stats=global_stats,
                moving_mean_name=name + '_bn_mean',
                moving_variance_name=name + '_bn_variance')
        norm_params = self.bn.parameters()
        if freeze_norm:
            for param in norm_params:
                param.stop_gradient = True
Exemplo n.º 25
0
 def __init__(self, in_chan, out_chan, *args, **kwargs):
     super(AttentionRefinementModule, self).__init__()
     self.conv = ConvBNReLU(in_chan, out_chan, ks=3, stride=1, padding=1)
     self.conv_atten = nn.Conv2D(out_chan,
                                 out_chan,
                                 kernel_size=1,
                                 bias_attr=False)
     self.bn_atten = nn.BatchNorm(out_chan)
     self.sigmoid_atten = nn.Sigmoid()
Exemplo n.º 26
0
 def __init__(self, in_planes, out_planes, scale_factor=(1, 2, 2)):
     super(Upsample, self).__init__()
     self.scale_factor = scale_factor
     self.conv3d = nn.Conv3D(in_planes,
                             out_planes,
                             kernel_size=(3, 3, 3),
                             stride=(1, 1, 1),
                             padding=(1, 1, 1))
     self.bn = nn.BatchNorm(out_planes)
Exemplo n.º 27
0
    def __init__(self,
                 ch_in,
                 ch_out,
                 filter_size,
                 stride=1,
                 groups=1,
                 norm_type=None,
                 norm_groups=32,
                 norm_decay=0.,
                 freeze_norm=False,
                 act=None):
        super(ConvNormLayer, self).__init__()
        self.act = act
        norm_lr = 0. if freeze_norm else 1.
        if norm_type is not None:
            assert norm_type in ['bn', 'sync_bn', 'gn'],\
                "norm_type should be one of ['bn', 'sync_bn', 'gn'], but got {}".format(norm_type)
            param_attr = ParamAttr(
                initializer=Constant(1.0),
                learning_rate=norm_lr,
                regularizer=L2Decay(norm_decay),
            )
            bias_attr = ParamAttr(learning_rate=norm_lr,
                                  regularizer=L2Decay(norm_decay))
            global_stats = True if freeze_norm else False
            if norm_type in ['bn', 'sync_bn']:
                self.norm = nn.BatchNorm(
                    ch_out,
                    param_attr=param_attr,
                    bias_attr=bias_attr,
                    use_global_stats=global_stats,
                )
            elif norm_type == 'gn':
                self.norm = nn.GroupNorm(num_groups=norm_groups,
                                         num_channels=ch_out,
                                         weight_attr=param_attr,
                                         bias_attr=bias_attr)
            norm_params = self.norm.parameters()
            if freeze_norm:
                for param in norm_params:
                    param.stop_gradient = True
            conv_bias_attr = False
        else:
            conv_bias_attr = True
            self.norm = None

        self.conv = nn.Conv2D(
            in_channels=ch_in,
            out_channels=ch_out,
            kernel_size=filter_size,
            stride=stride,
            padding=(filter_size - 1) // 2,
            groups=groups,
            weight_attr=ParamAttr(initializer=Normal(mean=0., std=0.001)),
            bias_attr=conv_bias_attr)
Exemplo n.º 28
0
 def __init__(self, in_channels=3, num_classes=2):
     super().__init__()
     self.backbone = Vgg16Base(in_channels=in_channels)
     self.sa1 = SAM()
     self.sa2 = SAM()
     self.sa3 = SAM()
     self.sa4 = SAM()
     self.sa5 = SAM()
     # branch1
     self.ca1 = CAM(in_channels=1024, ratio=8)
     self.bn_ca1 = nn.BatchNorm(1024)
     self.o1_conv1 = CPBD(1024, 512)
     self.o1_conv2 = CPBD(512, 512)
     self.bn_sa1 = nn.BatchNorm(512)
     self.o1_conv3 = nn.Conv2D(512, num_classes, 1)
     self.trans_conv1 = nn.Conv2DTranspose(512,
                                           512,
                                           kernel_size=2,
                                           stride=2)
     # branch 2
     self.ca2 = CAM(in_channels=1536, ratio=8)
     self.bn_ca2 = nn.BatchNorm(1536)
     self.o2_conv1 = CPBD(1536, 512)
     self.o2_conv2 = CPBD(512, 256)
     self.o2_conv3 = CPBD(256, 256)
     self.bn_sa2 = nn.BatchNorm(256)
     self.o2_conv4 = nn.Conv2D(256, num_classes, 1)
     self.trans_conv2 = nn.Conv2DTranspose(256,
                                           256,
                                           kernel_size=2,
                                           stride=2)
     # branch 3
     self.ca3 = CAM(in_channels=768, ratio=8)
     self.o3_conv1 = CPBD(768, 256)
     self.o3_conv2 = CPBD(256, 128)
     self.o3_conv3 = CPBD(128, 128)
     self.bn_sa3 = nn.BatchNorm(128)
     self.o3_conv4 = nn.Conv2D(128, num_classes, 1)
     self.trans_conv3 = nn.Conv2DTranspose(128,
                                           128,
                                           kernel_size=2,
                                           stride=2)
     # branch 4
     self.ca4 = CAM(in_channels=384, ratio=8)
     self.o4_conv1 = CPBD(384, 128)
     self.o4_conv2 = CPBD(128, 64)
     self.o4_conv3 = CPBD(64, 64)
     self.bn_sa4 = nn.BatchNorm(64)
     self.o4_conv4 = nn.Conv2D(64, num_classes, 1)
     self.trans_conv4 = nn.Conv2DTranspose(64, 64, kernel_size=2, stride=2)
     # branch 5
     self.ca5 = CAM(in_channels=192, ratio=8)
     self.o5_conv1 = CPBD(192, 64)
     self.o5_conv2 = CPBD(64, 32)
     self.o5_conv3 = CPBD(32, 16)
     self.bn_sa5 = nn.BatchNorm(16)
     self.o5_conv4 = nn.Conv2D(16, num_classes, 1)
Exemplo n.º 29
0
    def __init__(self,
                 ch_in,
                 ch_out,
                 filter_size,
                 stride=1,
                 norm_type='bn',
                 norm_groups=32,
                 use_dcn=False,
                 norm_decay=0.,
                 freeze_norm=False,
                 act=None,
                 name=None):
        super(ConvNormLayer, self).__init__()
        assert norm_type in ['bn', 'sync_bn', 'gn']

        self.act = act
        self.conv = nn.Conv2D(in_channels=ch_in,
                              out_channels=ch_out,
                              kernel_size=filter_size,
                              stride=stride,
                              padding=(filter_size - 1) // 2,
                              groups=1,
                              weight_attr=ParamAttr(name=name + "_weights",
                                                    initializer=Normal(
                                                        mean=0., std=0.01)),
                              bias_attr=False)

        norm_lr = 0. if freeze_norm else 1.

        norm_name = name + '_bn'
        param_attr = ParamAttr(name=norm_name + "_scale",
                               learning_rate=norm_lr,
                               regularizer=L2Decay(norm_decay))
        bias_attr = ParamAttr(name=norm_name + "_offset",
                              learning_rate=norm_lr,
                              regularizer=L2Decay(norm_decay))
        global_stats = True if freeze_norm else False
        if norm_type in ['bn', 'sync_bn']:
            self.norm = nn.BatchNorm(ch_out,
                                     param_attr=param_attr,
                                     bias_attr=bias_attr,
                                     use_global_stats=global_stats,
                                     moving_mean_name=norm_name + '_mean',
                                     moving_variance_name=norm_name +
                                     '_variance')
        elif norm_type == 'gn':
            self.norm = nn.GroupNorm(num_groups=norm_groups,
                                     num_channels=ch_out,
                                     weight_attr=param_attr,
                                     bias_attr=bias_attr)
        norm_params = self.norm.parameters()
        if freeze_norm:
            for param in norm_params:
                param.stop_gradient = True
Exemplo n.º 30
0
 def __init__(self, in_channels, out_channels):
     super(CPBD, self).__init__(
         nn.Conv2D(in_channels,
                   out_channels,
                   kernel_size=3,
                   stride=1,
                   padding=1),
         nn.PReLU(),
         nn.BatchNorm(out_channels),
         nn.Dropout(p=0.6),
     )