Пример #1
0
    def __init__(self, inp, oup, k, s, p, dw=False, linear=False, data_format="NCHW"):
        super().__init__()
        self.linear = linear
        if dw:
            self.conv = nn.Conv2D(
                inp, oup, k, s, p, groups=inp, bias_attr=False, data_format=data_format)
        else:
            self.conv = nn.Conv2D(inp, oup, k, s, p, bias_attr=False, data_format=data_format)

        self.bn = nn.BatchNorm2D(oup, data_format=data_format)
        if not linear:
            self.prelu = nn.PReLU(oup, data_format=data_format)
Пример #2
0
    def __init__(self, extract_embedding: bool = True):

        super(CNN6, self).__init__()
        self.bn0 = nn.BatchNorm2D(64)
        self.conv_block1 = ConvBlock5x5(in_channels=1, out_channels=64)
        self.conv_block2 = ConvBlock5x5(in_channels=64, out_channels=128)
        self.conv_block3 = ConvBlock5x5(in_channels=128, out_channels=256)
        self.conv_block4 = ConvBlock5x5(in_channels=256, out_channels=512)

        self.fc1 = nn.Linear(512, self.emb_size)
        self.fc_audioset = nn.Linear(self.emb_size, 527)
        self.extract_embedding = extract_embedding
Пример #3
0
    def _make_layer(self, planes, blocks, stride):
        downsample = None
        if stride != [1, 1] or self.inplanes != planes:
            downsample = nn.Sequential(conv1x1(self.inplanes, planes, stride),
                                       nn.BatchNorm2D(planes))

        layers = []
        layers.append(AsterBlock(self.inplanes, planes, stride, downsample))
        self.inplanes = planes
        for _ in range(1, blocks):
            layers.append(AsterBlock(self.inplanes, planes))
        return nn.Sequential(*layers)
Пример #4
0
    def _make_layer(self, block, planes, blocks, stride=1):
        downsample = None
        if stride != 1 or self.inplanes != planes * block.expansion:
            downsample = nn.Sequential(
                nn.Conv2D(self.inplanes, planes * block.expansion, kernel_size=1, stride=stride),
                nn.BatchNorm2D(planes * block.expansion),)
        layers = [block(self.inplanes, planes, stride, downsample, use_se=self.use_se)]
        self.inplanes = planes
        for i in range(1, blocks):
            layers.append(block(self.inplanes, planes, use_se=self.use_se))

        return nn.Sequential(*layers)
Пример #5
0
 def __init__(self, inplanes, planes, stride=1, downsample=None):
     super(Bottleneck, self).__init__()
     self.bn1 = nn.BatchNorm2D(inplanes)
     self.conv1 = nn.Conv2D(inplanes,
                            planes,
                            kernel_size=1,
                            bias_attr=False)
     self.bn2 = nn.BatchNorm2D(planes)
     self.conv2 = nn.Conv2D(planes, (planes * 1),
                            kernel_size=3,
                            stride=stride,
                            padding=1,
                            bias_attr=False)
     self.bn3 = nn.BatchNorm2D((planes * 1))
     self.conv3 = nn.Conv2D((planes * 1),
                            planes * Bottleneck.outchannel_ratio,
                            kernel_size=1,
                            bias_attr=False)
     self.bn4 = nn.BatchNorm2D(planes * Bottleneck.outchannel_ratio)
     self.relu = nn.ReLU()
     self.downsample = downsample
     self.stride = stride
Пример #6
0
 def __init__(self, npoint, radius, nsample, in_channel, mlp, group_all):
     super(PointNetSetAbstraction, self).__init__()
     self.npoint = npoint
     self.radius = radius
     self.nsample = nsample
     self.mlp_convs = []
     self.mlp_bns = []
     last_channel = in_channel
     for out_channel in mlp:
         self.mlp_convs.append(nn.Conv2D(last_channel, out_channel, 1))
         self.mlp_bns.append(nn.BatchNorm2D(out_channel))
         last_channel = out_channel
     self.group_all = group_all
Пример #7
0
 def __init__(self, in_chan, out_chan):
     super(AttentionRefinementModule, self).__init__()
     self.conv = layers.ConvBNReLU(in_chan,
                                   out_chan,
                                   kernel_size=3,
                                   stride=1,
                                   padding=1)
     self.conv_atten = nn.Conv2D(out_chan,
                                 out_chan,
                                 kernel_size=1,
                                 bias_attr=None)
     self.bn_atten = nn.BatchNorm2D(out_chan)
     self.sigmoid_atten = nn.Sigmoid()
Пример #8
0
def BatchNorm2d(num_features, eps=1e-05, momentum=0.9, affine=True):
    if not affine:
        weight_attr = False
        bias_attr = False
    else:
        weight_attr = None
        bias_attr = None
    batchnorm = nn.BatchNorm2D(num_features,
                               momentum,
                               eps,
                               weight_attr=weight_attr,
                               bias_attr=bias_attr)
    return batchnorm
Пример #9
0
 def _block(self, in_channels, out_channels, kernel_size, stride, padding):
     return nn.Sequential(
         nn.Conv2DTranspose(
             in_channels, out_channels, kernel_size, stride, padding, bias_attr=False, 
             weight_attr=paddle.ParamAttr(initializer=conv_initializer() )
         ),
         nn.BatchNorm2D(
             out_channels, 
             weight_attr=paddle.ParamAttr(initializer=bn_initializer() ) ,
             momentum=0.8
         ),
         nn.ReLU(),
     )
    def __init__(self, inp, oup, reduction=32):
        super(CoordAttention, self).__init__()
        self.pool_h = nn.AdaptiveAvgPool2D((None, 1))
        self.pool_w = nn.AdaptiveAvgPool2D((1, None))

        mip = max(8, inp // reduction)

        self.conv1 = nn.Conv2D(inp, mip, kernel_size=1, stride=1, padding=0)
        self.bn1 = nn.BatchNorm2D(mip)
        self.act = H_Swish()

        self.conv_h = nn.Conv2D(mip, oup, kernel_size=1, stride=1, padding=0)
        self.conv_w = nn.Conv2D(mip, oup, kernel_size=1, stride=1, padding=0)
Пример #11
0
 def __init__(self, num_classes):
     super(ResidualAttentionModel_56, self).__init__()
     self.conv1 = nn.Sequential(
         nn.Conv2D(3,
                   64,
                   kernel_size=7,
                   stride=2,
                   padding=3,
                   bias_attr=False), nn.BatchNorm2D(64), nn.ReLU())
     self.mpool1 = nn.MaxPool2D(kernel_size=3, stride=2, padding=1)
     self.residual_block1 = ResidualBlock(64, 256)
     self.attention_module1 = AttentionModule_stage1(256, 256)
     self.residual_block2 = ResidualBlock(256, 512, 2)
     self.attention_module2 = AttentionModule_stage2(512, 512)
     self.residual_block3 = ResidualBlock(512, 1024, 2)
     self.attention_module3 = AttentionModule_stage3(1024, 1024)
     self.residual_block4 = ResidualBlock(1024, 2048, 2)
     self.residual_block5 = ResidualBlock(2048, 2048)
     self.residual_block6 = ResidualBlock(2048, 2048)
     self.mpool2 = nn.Sequential(nn.BatchNorm2D(2048), nn.ReLU(),
                                 nn.AvgPool2D(kernel_size=7, stride=1))
     self.fc = nn.Linear(2048, num_classes)
Пример #12
0
 def __init__(self, c_in, c_out, is_downsample=False):
     super(BasicBlock, self).__init__()
     self.is_downsample = is_downsample
     if is_downsample:
         self.conv1 = nn.Conv2D(c_in, c_out, 3, stride=2, padding=1, bias_attr=False)
     else:
         self.conv1 = nn.Conv2D(c_in, c_out, 3, stride=1, padding=1, bias_attr=False)
     self.bn1 = nn.BatchNorm2D(c_out)
     self.relu = nn.ReLU()
     self.conv2 = nn.Conv2D(c_out, c_out, 3, stride=1, padding=1, bias_attr=False)
     self.bn2 = nn.BatchNorm2D(c_out)
     if is_downsample:
         self.downsample = nn.Sequential(
             nn.Conv2D(c_in, c_out, 1, stride=2, bias_attr=False),
             nn.BatchNorm2D(c_out)
         )
     elif c_in != c_out:
         self.downsample = nn.Sequential(
             nn.Conv2D(c_in, c_out, 1, stride=1, bias_attr=False),
             nn.BatchNorm2D(c_out)
         )
         self.is_downsample = True
Пример #13
0
    def __init__(self, in_channel_left, in_channel_right):
        super(MFR, self).__init__()
        self.conv0 = nn.Conv2D(in_channel_left, 256, 3, 1, 1)
        self.bn0 = nn.BatchNorm2D(256)
        self.conv1 = nn.Conv2D(in_channel_right, 256, 1)
        self.bn1 = nn.BatchNorm2D(256)

        self.conv2 = nn.Conv2D(256, 256, kernel_size=3, stride=1, padding=1)
        self.bn2 = nn.BatchNorm2D(256)

        self.conv13 = nn.Conv2D(256,
                                256,
                                kernel_size=(1, 3),
                                stride=1,
                                padding=(0, 1))
        self.bn13 = nn.BatchNorm2D(256)
        self.conv31 = nn.Conv2D(256,
                                256,
                                kernel_size=(3, 1),
                                stride=1,
                                padding=(1, 0))
        self.bn31 = nn.BatchNorm2D(256)
Пример #14
0
 def __init__(self, in_channels, out_channels, kernel_size, residual):
     super(DlaRoot, self).__init__()
     self.conv = nn.Conv2D(
         in_channels,
         out_channels,
         1,
         stride=1,
         bias_attr=False,
         padding=(kernel_size - 1) // 2,
     )
     self.bn = nn.BatchNorm2D(out_channels)
     self.relu = nn.ReLU()
     self.residual = residual
Пример #15
0
    def __init__(self, in_dim, out_dim, kernel_size=4, stride=2, padding=1, use_dropout=False):
        super(Upsample, self).__init__()

        sequence = [
            nn.ReLU(),                    # ReLU
            nn.Conv2DTranspose(in_dim, out_dim, kernel_size, stride, padding, bias_attr=False),                    # Conv2DTranspose
            nn.BatchNorm2D(out_dim)                    # nn.BatchNorm2D
        ]

        if use_dropout:
            sequence.append(nn.Dropout(p=0.5))

        self.layers = nn.Sequential(*sequence)
Пример #16
0
 def __init__(self, in_channel):
     super(SR, self).__init__()
     self.conv1 = nn.Conv2D(in_channel,
                            192,
                            kernel_size=3,
                            stride=1,
                            padding=1)
     self.bn1 = nn.BatchNorm2D(192)
     self.conv2 = nn.Conv2D(192,
                            192 * 2,
                            kernel_size=3,
                            stride=1,
                            padding=1)
Пример #17
0
 def __init__(self,
              in_features,
              out_features,
              groups=1,
              kernel_size=3,
              padding=1):
     super(SameBlock2d, self).__init__()
     self.conv = nn.Conv2D(in_channels=in_features,
                           out_channels=out_features,
                           kernel_size=kernel_size,
                           padding=padding,
                           groups=groups)
     self.norm = nn.BatchNorm2D(out_features)
Пример #18
0
 def __init__(self):
     super(Generator, self).__init__()
     self.conv_1 = nn.Conv2DTranspose(
         100,512,4,1,0,
         bias_attr=False,weight_attr=paddle.ParamAttr(name="g_dconv_weight_1_",initializer=conv_initializer)
         )
     self.bn_1 = nn.BatchNorm2D(
         512,
         weight_attr=paddle.ParamAttr(name="g_1_bn_weight_",initializer=bn_initializer),momentum=0.8
         )
     self.conv_2 = nn.Conv2DTranspose(
         512,256,4,2,1,
         bias_attr=False,weight_attr=paddle.ParamAttr(name="g_dconv_weight_2_",initializer=conv_initializer)
         )
     self.bn_2 = nn.BatchNorm2D(
         256,
         weight_attr=paddle.ParamAttr(name="g_2_bn_weight_",initializer=bn_initializer),momentum=0.8
         )
     self.conv_3 = nn.Conv2DTranspose(
         256,128,4,2,1,
         bias_attr=False,weight_attr=paddle.ParamAttr(name="g_dconv_weight_3_",initializer=conv_initializer)
         )
     self.bn_3 = nn.BatchNorm2D(
         128,
         weight_attr=paddle.ParamAttr(name="g_3_bn_weight_",initializer=bn_initializer),momentum=0.8
         )
     self.conv_4 = nn.Conv2DTranspose(
         128,64,4,2,1,
         bias_attr=False,weight_attr=paddle.ParamAttr(name="g_dconv_weight_4_",initializer=conv_initializer)
         )
     self.bn_4 = nn.BatchNorm2D(
         64,
         weight_attr=paddle.ParamAttr(name="g_4_bn_weight_",initializer=bn_initializer),momentum=0.8
         )
     self.conv_5 = nn.Conv2DTranspose(
         64,3,4,2,1,
         bias_attr=False,weight_attr=paddle.ParamAttr(name="g_dconv_weight_5_",initializer=conv_initializer)
         )
     self.tanh = paddle.nn.Tanh()
Пример #19
0
    def __init__(self, in_dim, out_dim, kernel_size=4, stride=2, padding=1):
        super(ConvBlock, self).__init__()

        self.layers = nn.Sequential(
            nn.Conv2D(in_dim,
                      out_dim,
                      kernel_size,
                      stride,
                      padding,
                      bias_attr=False),  # Conv2D
            nn.BatchNorm2D(out_dim),  # BatchNorm2D
            nn.LeakyReLU(0.2)  # LeakyReLU, leaky=0.2
        )
Пример #20
0
    def __init__(self, inplanes, planes, stride=1):
        super().__init__()

        # all conv layers have stride 1. an avgpool is performed after the second convolution when stride > 1
        self.conv1 = nn.Conv2D(inplanes, planes, 1, bias_attr=False)
        self.bn1 = nn.BatchNorm2D(planes)

        self.conv2 = nn.Conv2D(planes, planes, 3, padding=1, bias_attr=False)
        self.bn2 = nn.BatchNorm2D(planes)

        self.avgpool = nn.AvgPool2D(stride) if stride > 1 else Identity()

        self.conv3 = nn.Conv2D(planes,
                               planes * self.expansion,
                               1,
                               bias_attr=False)
        self.bn3 = nn.BatchNorm2D(planes * self.expansion)

        self.relu = nn.ReLU()
        self.downsample = None
        self.stride = stride

        if stride > 1 or inplanes != planes * Bottleneck.expansion:
            # downsampling layer is prepended with an avgpool, and the subsequent convolution has stride 1
            #             self.downsample = nn.Sequential(OrderedDict([
            #                 ("-1", nn.AvgPool2D(stride)),
            #                 ("0", nn.Conv2D(inplanes, planes * self.expansion, 1, stride=1, bias_attr=False)),
            #                 ("1", nn.BatchNorm2D(planes * self.expansion))
            #             ]))
            self.downsample = nn.Sequential(
                ("-1", nn.AvgPool2D(stride)),
                ("0",
                 nn.Conv2D(inplanes,
                           planes * self.expansion,
                           1,
                           stride=1,
                           bias_attr=False)),
                ("1", nn.BatchNorm2D(planes * self.expansion)))
Пример #21
0
    def __init__(self, channel):
        super(SRMLayer, self).__init__()

        self.cfc = self.create_parameter(
            shape=[channel, 2],
            default_initializer=nn.initializer.Assign(
                paddle.zeros([channel, 2])))

        self.bn = nn.BatchNorm2D(channel)
        self.activation = nn.Sigmoid()

        setattr(self.cfc, 'srm_param', True)
        setattr(self.bn.weight, 'srm_param', True)
        setattr(self.bn.bias, 'srm_param', True)
Пример #22
0
 def __init__(self,
              in_features,
              out_features,
              kernel_size=3,
              padding=1,
              groups=1):
     super(DownBlock2d, self).__init__()
     self.conv = nn.Conv2D(in_channels=in_features,
                           out_channels=out_features,
                           kernel_size=kernel_size,
                           padding=padding,
                           groups=groups)
     self.norm = nn.BatchNorm2D(out_features)
     self.pool = nn.AvgPool2D(kernel_size=(2, 2))
Пример #23
0
def conv3x3_block(in_channels, out_channels, stride=1):
    n = 3 * 3 * out_channels
    w = math.sqrt(2. / n)
    conv_layer = nn.Conv2D(
        in_channels,
        out_channels,
        kernel_size=3,
        stride=stride,
        padding=1,
        weight_attr=nn.initializer.Normal(
            mean=0.0, std=w),
        bias_attr=nn.initializer.Constant(0))
    block = nn.Sequential(conv_layer, nn.BatchNorm2D(out_channels), nn.ReLU())
    return block
Пример #24
0
 def __init__(
     self, in_channels, out_channels, kernel_size, stride, padding, groups=1
 ):
     super(ConvBN, self).__init__()
     self.conv = nn.Conv2D(
         in_channels=in_channels,
         out_channels=out_channels,
         kernel_size=kernel_size,
         stride=stride,
         padding=padding,
         groups=groups,
         bias_attr=False,
     )
     self.bn = nn.BatchNorm2D(num_features=out_channels)
    def __init__(self, num_modules=1, end_relu=False, num_landmarks=98, fname_pretrained=None):
        super(FAN, self).__init__()
        self.num_modules = num_modules
        self.end_relu = end_relu

        # Base part
        self.conv1 = CoordConvTh(256, 256, True, False,
                                 in_channels=3, out_channels=64,
                                 kernel_size=7, stride=2, padding=3)
        self.bn1 = nn.BatchNorm2D(64)
        self.conv2 = ConvBlock(64, 128)
        self.conv3 = ConvBlock(128, 128)
        self.conv4 = ConvBlock(128, 256)

        # Stacking part
        self.add_sublayer('m0', HourGlass(1, 4, 256, first_one=True))
        self.add_sublayer('top_m_0', ConvBlock(256, 256))
        self.add_sublayer('conv_last0', nn.Conv2D(256, 256, 1, 1, 0))
        self.add_sublayer('bn_end0', nn.BatchNorm2D(256))
        self.add_sublayer('l0', nn.Conv2D(256, num_landmarks+1, 1, 1, 0))

        if fname_pretrained is not None:
            self.load_pretrained_weights(fname_pretrained)
Пример #26
0
 def __init__(self,
              inplanes,
              planes,
              stride=1,
              downsample=None,
              groups=1,
              base_width=64,
              dilation=1):
     super(IBasicBlock, self).__init__()
     if groups != 1 or base_width != 64:
         raise ValueError(
             'BasicBlock only supports groups=1 and base_width=64')
     if dilation > 1:
         raise NotImplementedError(
             "Dilation > 1 not supported in BasicBlock")
     self.bn1 = nn.BatchNorm2D(inplanes, epsilon=1e-05, momentum=0.1)
     self.conv1 = conv3x3(inplanes, planes)
     self.bn2 = nn.BatchNorm2D(planes, epsilon=1e-05, momentum=0.1)
     self.prelu = nn.PReLU(planes)
     self.conv2 = conv3x3(planes, planes, stride)
     self.bn3 = nn.BatchNorm2D(planes, epsilon=1e-05, momentum=0.1)
     self.downsample = downsample
     self.stride = stride
Пример #27
0
 def __init__(self,
              cin,
              cout,
              kernel_size,
              stride,
              padding,
              output_padding=0,
              *args,
              **kwargs):
     super().__init__(*args, **kwargs)
     self.conv_block = nn.Sequential(
         nn.Conv2DTranspose(cin, cout, kernel_size, stride, padding,
                            output_padding), nn.BatchNorm2D(cout))
     self.act = nn.ReLU()
Пример #28
0
 def __init__(self, in_channels, channels, se_ratio=12):
     super(SE, self).__init__()
     self.avg_pool = nn.AdaptiveAvgPool2D(1)
     self.fc = nn.Sequential(
         nn.Conv2D(in_channels,
                   channels // se_ratio,
                   kernel_size=1,
                   padding=0),
         nn.BatchNorm2D(channels // se_ratio),
         nn.ReLU(),
         nn.Conv2D(channels // se_ratio, channels, kernel_size=1,
                   padding=0),
         nn.Sigmoid(),
     )
Пример #29
0
def make_layers(cfg, batch_norm=False):
    layers = []
    in_channels = 3
    for v in cfg:
        if v == 'M':
            layers += [nn.MaxPool2D(kernel_size=2, stride=2)]
        else:
            conv2d = nn.Conv2D(in_channels, v, kernel_size=3, padding=1)
            if batch_norm:
                layers += [conv2d, nn.BatchNorm2D(v), nn.ReLU()]
            else:
                layers += [conv2d, nn.ReLU()]
            in_channels = v
    return nn.Sequential(*layers)
Пример #30
0
    def __init__(self, ):
        super(Discriminator, self).__init__()
        self.dis = nn.Sequential(

            # input [B, 1, 32, 32] -> [B, 64, 16, 16]
            nn.Conv2D(1, 64, 4, 2, 1, bias_attr=False),
            nn.LeakyReLU(0.2),

            # state size. [B, 64, 16, 16] -> [B, 128, 8, 8]
            nn.Conv2D(64, 64 * 2, 4, 2, 1, bias_attr=False),
            nn.BatchNorm2D(64 * 2),
            nn.LeakyReLU(0.2),

            # state size. [B, 128, 8, 8] -> [B, 256, 4, 4]
            nn.Conv2D(64 * 2, 64 * 4, 4, 2, 1, bias_attr=False),
            nn.BatchNorm2D(64 * 4),
            nn.LeakyReLU(0.2),

            # state size. [B, 256, 4, 4] -> [B, 1, 1, 1]
            nn.Conv2D(64 * 4, 1, 4, 1, 0, bias_attr=False),
            # 这里为需要改变的地方
            # nn.Sigmoid()
            nn.LeakyReLU())