Пример #1
0
 def __init__(self):
     super(RNet, self).__init__(name_scope='RNet')
     weight_attr = paddle.ParamAttr(
         regularizer=paddle.regularizer.L2Decay(0.0005))
     self.conv1 = nn.Conv2D(in_channels=3,
                            out_channels=28,
                            kernel_size=3,
                            padding='valid',
                            weight_attr=weight_attr)
     self.prelu1 = nn.PReLU()
     self.pool1 = nn.MaxPool2D(kernel_size=3, stride=2, padding='same')
     self.conv2 = nn.Conv2D(in_channels=28,
                            out_channels=48,
                            kernel_size=3,
                            padding='valid',
                            weight_attr=weight_attr)
     self.prelu2 = nn.PReLU()
     self.pool2 = nn.MaxPool2D(kernel_size=3, stride=2)
     self.conv3 = nn.Conv2D(in_channels=48,
                            out_channels=64,
                            kernel_size=2,
                            padding='valid',
                            weight_attr=weight_attr)
     self.prelu3 = nn.PReLU()
     self.flatten = nn.Flatten()
     self.fc = nn.Linear(in_features=576, out_features=128)
     self.class_fc = nn.Linear(in_features=128, out_features=2)
     self.bbox_fc = nn.Linear(in_features=128, out_features=4)
     self.landmark_fc = nn.Linear(in_features=128, out_features=10)
Пример #2
0
    def __init__(self):

        super(ONet, self).__init__()

        self.features = nn.Sequential(
            OrderedDict([
                ('conv1', nn.Conv2D(3, 32, 3, 1)),
                ('prelu1', nn.PReLU(32)),
                ('pool1', nn.MaxPool2D(3, 2, ceil_mode=True)),
                ('conv2', nn.Conv2D(32, 64, 3, 1)),
                ('prelu2', nn.PReLU(64)),
                ('pool2', nn.MaxPool2D(3, 2, ceil_mode=True)),
                ('conv3', nn.Conv2D(64, 64, 3, 1)),
                ('prelu3', nn.PReLU(64)),
                ('pool3', nn.MaxPool2D(2, 2, ceil_mode=True)),
                ('conv4', nn.Conv2D(64, 128, 2, 1)),
                ('prelu4', nn.PReLU(128)),
                ('flatten', nn.Flatten()),
                ('conv5', nn.Linear(1152, 256)),
                ('drop5', nn.Dropout(0.25)),
                ('prelu5', nn.PReLU(256)),
            ]))

        self.conv6_1 = nn.Linear(256, 2)
        self.conv6_2 = nn.Linear(256, 4)
        self.conv6_3 = nn.Linear(256, 10)

        weights = np.load("./onet.npy", allow_pickle=True)[()]
        for n, p in self.named_parameters():
            # ###p.data = torch.FloatTensor(weights[n])
            p.data = paddle.to_tensor(weights[n])
Пример #3
0
    def __init__(self):

        super(PNet, self).__init__()

        # suppose we have input with size HxW, then
        # after first layer: H - 2,
        # after pool: ceil((H - 2)/2),
        # after second conv: ceil((H - 2)/2) - 2,
        # after last conv: ceil((H - 2)/2) - 4,
        # and the same for W

        self.features = nn.Sequential(
            OrderedDict([('conv1', nn.Conv2D(3, 10, 3, 1)),
                         ('prelu1', nn.PReLU(10)),
                         ('pool1', nn.MaxPool2D(2, 2, ceil_mode=True)),
                         ('conv2', nn.Conv2D(10, 16, 3, 1)),
                         ('prelu2', nn.PReLU(16)),
                         ('conv3', nn.Conv2D(16, 32, 3, 1)),
                         ('prelu3', nn.PReLU(32))]))

        self.conv4_1 = nn.Conv2D(32, 2, 1, 1)
        self.conv4_2 = nn.Conv2D(32, 4, 1, 1)

        weights = np.load("./pnet.npy", allow_pickle=True)[()]
        for n, p in self.named_parameters():
            # ###p.data = torch.FloatTensor(weights[n])
            p.data = paddle.to_tensor(weights[n])
Пример #4
0
    def __init__(self, conv, scale, n_feats, bn=False, act=False, bias=True):
        m = []
        if (scale & (scale - 1)) == 0:  # Is scale = 2^n?
            for _ in range(int(math.log(scale, 2))):
                m.append(conv(n_feats, 4 * n_feats, 3, bias))
                m.append(nn.PixelShuffle(2))
                if bn: m.append(nn.BatchNorm2D(n_feats))

                if act == 'relu':
                    m.append(nn.ReLU())
                elif act == 'prelu':
                    m.append(nn.PReLU(n_feats))

        elif scale == 3:
            m.append(conv(n_feats, 9 * n_feats, 3, bias))
            m.append(nn.PixelShuffle(3))
            if bn: m.append(nn.BatchNorm2D(n_feats))

            if act == 'relu':
                m.append(nn.ReLU())
            elif act == 'prelu':
                m.append(nn.PReLU(n_feats))
        else:
            raise NotImplementedError

        super(Upsampler, self).__init__(*m)
Пример #5
0
    def __init__(self, inp, oup, stride, expansion, data_format="NCHW"):
        super().__init__()
        self.connect = stride == 1 and inp == oup

        self.conv = nn.Sequential(
            # 1*1 conv
            nn.Conv2D(
                inp, inp * expansion, 1, 1, 0, bias_attr=False, data_format=data_format),
            nn.BatchNorm2D(inp * expansion, data_format=data_format),
            nn.PReLU(inp * expansion, data_format=data_format),

            # 3*3 depth wise conv
            nn.Conv2D(
                inp * expansion,
                inp * expansion,
                3,
                stride,
                1,
                groups=inp * expansion,
                bias_attr=False,
                data_format=data_format
            ),
            nn.BatchNorm2D(inp * expansion, data_format=data_format),
            nn.PReLU(inp * expansion, data_format=data_format),

            # 1*1 conv
            nn.Conv2D(
                inp * expansion, oup, 1, 1, 0, bias_attr=False, data_format=data_format),
            nn.BatchNorm2D(oup, data_format=data_format), )
    def __init__(self, num_layers, mode='ir', opts=None):
        super(GradualStyleEncoder, self).__init__()
        assert num_layers in [50, 100, 152], 'num_layers should be 50,100, or 152'
        assert mode in ['ir', 'ir_se'], 'mode should be ir or ir_se'
        blocks = get_blocks(num_layers)
        if mode == 'ir':
            unit_module = BottleneckIR
        elif mode == 'ir_se':
            unit_module = BottleneckIRSE
        self.input_layer = nn.Sequential(nn.Conv2D(opts.input_nc, 64, (3, 3), 1, 1, bias_attr=False),
                                         nn.BatchNorm2D(64),
                                         nn.PReLU(64))
        modules = []
        for block in blocks:
            for bottleneck in block:
                modules.append(unit_module(bottleneck.in_channel,
                                           bottleneck.depth,
                                           bottleneck.stride))
        self.body = nn.Sequential(*modules)

        self.styles = nn.LayerList()
        self.style_count = 18
        self.coarse_ind = 3
        self.middle_ind = 7
        for i in range(self.style_count):
            if i < self.coarse_ind:
                style = GradualStyleBlock(512, 512, 16)
            elif i < self.middle_ind:
                style = GradualStyleBlock(512, 512, 32)
            else:
                style = GradualStyleBlock(512, 512, 64)
            self.styles.append(style)
        self.latlayer1 = nn.Conv2D(256, 512, kernel_size=1, stride=1, padding=0)
        self.latlayer2 = nn.Conv2D(128, 512, kernel_size=1, stride=1, padding=0)
 def __init__(self, num_layers, mode='ir', opts=None):
     super(BackboneEncoderUsingLastLayerIntoWPlus, self).__init__()
     print('Using BackboneEncoderUsingLastLayerIntoWPlus')
     assert num_layers in [50, 100, 152], 'num_layers should be 50,100, or 152'
     assert mode in ['ir', 'ir_se'], 'mode should be ir or ir_se'
     blocks = get_blocks(num_layers)
     if mode == 'ir':
         unit_module = BottleneckIR
     elif mode == 'ir_se':
         unit_module = BottleneckIRSE
     self.input_layer = nn.Sequential(nn.Conv2D(opts.input_nc, 64, (3, 3), 1, 1, bias_attr=False),
                                      nn.BatchNorm2D(64),
                                      nn.PReLU(64))
     self.output_layer_2 = nn.Sequential(nn.BatchNorm2D(512),
                                         nn.AdaptiveAvgPool2D((7, 7)),
                                         Flatten(),
                                         nn.Linear(512 * 7 * 7, 512))
     self.linear = EqualLinear(512, 512 * 18, lr_mul=1)
     modules = []
     for block in blocks:
         for bottleneck in block:
             modules.append(unit_module(bottleneck.in_channel,
                                        bottleneck.depth,
                                        bottleneck.stride))
     self.body = nn.Sequential(*modules)
Пример #8
0
    def __init__(self, num_classes=10):
        super(ImperativeLenet, self).__init__()
        self.features = nn.Sequential(
            nn.Conv2D(
                in_channels=1,
                out_channels=6,
                kernel_size=3,
                stride=1,
                padding=1,
                bias_attr=False),
            nn.BatchNorm2D(6),
            nn.ReLU(),
            nn.MaxPool2D(
                kernel_size=2, stride=2),
            nn.Conv2D(
                in_channels=6,
                out_channels=16,
                kernel_size=5,
                stride=1,
                padding=0),
            nn.BatchNorm2D(16),
            nn.PReLU(),
            nn.MaxPool2D(
                kernel_size=2, stride=2))

        self.fc = nn.Sequential(
            nn.Linear(
                in_features=400, out_features=120),
            nn.LeakyReLU(),
            nn.Linear(
                in_features=120, out_features=84),
            nn.Sigmoid(),
            nn.Linear(
                in_features=84, out_features=num_classes),
            nn.Softmax())
Пример #9
0
 def __init__(self, channel, reduction=16):
     super(SEBlock, self).__init__()
     self.avg_pool = nn.AdaptiveAvgPool2D(1)
     self.fc = nn.Sequential(
         nn.Linear(channel, channel // reduction),
         nn.PReLU(),
         nn.Linear(channel // reduction, channel),
         nn.Sigmoid()
     )
Пример #10
0
 def __init__(self, in_feats, hidden, out_feats, num_hops, n_layers, dropout, input_drop):
     super(SIGN, self).__init__()
     self.dropout = nn.Dropout(dropout)
     self.prelu = nn.PReLU()
     self.inception_ffs = nn.LayerList()
     self.input_drop = input_drop
     for i in range(num_hops):
         self.inception_ffs.append(FeedForwardNet(in_feats, hidden, hidden, n_layers, dropout))
         self.project = FeedForwardNet(num_hops * hidden, hidden, out_feats, n_layers, dropout)
Пример #11
0
 def __init__(self, in_channels, out_channels):
     super(CPBD, self).__init__(
         nn.Conv2D(in_channels,
                   out_channels,
                   kernel_size=3,
                   stride=1,
                   padding=1),
         nn.PReLU(),
         nn.BatchNorm(out_channels),
         nn.Dropout(p=0.6),
     )
Пример #12
0
    def __init__(self, inp, oup, k, s, p, dw=False, linear=False, data_format="NCHW"):
        super().__init__()
        self.linear = linear
        if dw:
            self.conv = nn.Conv2D(
                inp, oup, k, s, p, groups=inp, bias_attr=False, data_format=data_format)
        else:
            self.conv = nn.Conv2D(inp, oup, k, s, p, bias_attr=False, data_format=data_format)

        self.bn = nn.BatchNorm2D(oup, data_format=data_format)
        if not linear:
            self.prelu = nn.PReLU(oup, data_format=data_format)
Пример #13
0
 def __init__(self, inplanes, planes, stride=1, downsample=None, use_se=True):
     super(IRBlock, self).__init__()
     self.bn0 = nn.BatchNorm2D(inplanes)
     self.conv1 = conv3x3(inplanes, inplanes)
     self.bn1 = nn.BatchNorm2D(inplanes)
     self.prelu = nn.PReLU()
     self.conv2 = conv3x3(inplanes, planes, stride)
     self.bn2 = nn.BatchNorm2D(planes)
     self.downsample = downsample
     self.stride = stride
     self.use_se = use_se
     if self.use_se:
         self.se = SEBlock(planes)
Пример #14
0
 def __init__(self):
     super(PNet, self).__init__(name_scope='PNet')
     weight_attr = paddle.ParamAttr(
         regularizer=paddle.regularizer.L2Decay(0.0005))
     self.conv1 = nn.Conv2D(in_channels=3,
                            out_channels=10,
                            kernel_size=3,
                            padding='valid',
                            weight_attr=weight_attr)
     self.prelu1 = nn.PReLU()
     self.pool1 = nn.MaxPool2D(kernel_size=2, stride=2, padding='same')
     self.conv2 = nn.Conv2D(in_channels=10,
                            out_channels=16,
                            kernel_size=3,
                            padding='valid',
                            weight_attr=weight_attr)
     self.prelu2 = nn.PReLU()
     self.conv3 = nn.Conv2D(in_channels=16,
                            out_channels=32,
                            kernel_size=3,
                            padding='valid',
                            weight_attr=weight_attr)
     self.prelu3 = nn.PReLU()
     self.conv4_1 = nn.Conv2D(in_channels=32,
                              out_channels=2,
                              kernel_size=1,
                              padding='valid',
                              weight_attr=weight_attr)
     self.conv4_2 = nn.Conv2D(in_channels=32,
                              out_channels=4,
                              kernel_size=1,
                              padding='valid',
                              weight_attr=weight_attr)
     self.conv4_3 = nn.Conv2D(in_channels=32,
                              out_channels=10,
                              kernel_size=1,
                              padding='valid',
                              weight_attr=weight_attr)
 def __init__(self, in_channel, depth, stride):
     super(BottleneckIR, self).__init__()
     if in_channel == depth:
         self.shortcut_layer = nn.MaxPool2D(1, stride)
     else:
         self.shortcut_layer = nn.Sequential(
             nn.Conv2D(in_channel, depth, (1, 1), stride, bias_attr=False),
             nn.BatchNorm2D(depth)
         )
     self.res_layer = nn.Sequential(
         nn.BatchNorm2D(in_channel),
         nn.Conv2D(in_channel, depth, (3, 3), (1, 1), 1, bias_attr=False), nn.PReLU(depth),
         nn.Conv2D(depth, depth, (3, 3), stride, 1, bias_attr=False), nn.BatchNorm2D(depth)
     )
Пример #16
0
def act_layer(act_type, inplace=False, neg_slope=0.2, n_prelu=1):
    # activation layer
    act = act_type.lower()
    if act == 'relu':
        layer = nn.ReLU()
    elif act == 'leakyrelu':
        layer = nn.LeakyReLU(neg_slope, inplace)
    elif act == 'prelu':
        layer = nn.PReLU(num_parameters=n_prelu, init=neg_slope)
    elif act == 'swish':
        layer = nn.Swish()
    else:
        raise NotImplementedError('activation layer [%s] is not found' % act)
    return layer
Пример #17
0
    def __init__(self):

        super(RNet, self).__init__()

        self.features = nn.Sequential(
            OrderedDict([('conv1', nn.Conv2D(3, 28, 3, 1)),
                         ('prelu1', nn.PReLU(28)),
                         ('pool1', nn.MaxPool2D(3, 2, ceil_mode=True)),
                         ('conv2', nn.Conv2D(28, 48, 3, 1)),
                         ('prelu2', nn.PReLU(48)),
                         ('pool2', nn.MaxPool2D(3, 2, ceil_mode=True)),
                         ('conv3', nn.Conv2D(48, 64, 2, 1)),
                         ('prelu3', nn.PReLU(64)), ('flatten', nn.Flatten()),
                         ('conv4', nn.Linear(576, 128)),
                         ('prelu4', nn.PReLU(128))]))

        self.conv5_1 = nn.Linear(128, 2)
        self.conv5_2 = nn.Linear(128, 4)

        weights = np.load("./rnet.npy", allow_pickle=True)[()]
        for n, p in self.named_parameters():
            # ###p.data = torch.FloatTensor(weights[n])
            p.data = paddle.to_tensor(weights[n])
Пример #18
0
 def __init__(self, in_feats, hidden, out_feats, n_layers, dropout):
     super(FeedForwardNet, self).__init__()
     self.layers = nn.LayerList()
     self.n_layers = n_layers
     # weight_attr = paddle.framework.ParamAttr(name="linear_weight", initializer=paddle.nn.initializer.XavierNormal())
     # bias_attr = paddle.framework.ParamAttr(name="linear_bias", initializer=paddle.nn.initializer.XavierNormal())
     if n_layers == 1:
         self.layers.append(nn.Linear(in_feats, out_feats))
     else:
         self.layers.append(nn.Linear(in_feats, hidden))
         for i in range(n_layers - 2):
             self.layers.append(nn.Linear(hidden, hidden))
         self.layers.append(nn.Linear(hidden, out_feats))
     if self.n_layers > 1:
         self.prelu = nn.PReLU()
         self.dropout = nn.Dropout(p=dropout)
Пример #19
0
 def __init__(self, block, layers, use_se=True):
     self.inplanes = 64
     self.use_se = use_se
     super(ResNetFace, self).__init__()
     self.conv1 = nn.Conv2D(3, 64, kernel_size=3, padding=1)
     self.bn1 = nn.BatchNorm2D(64)
     self.prelu = nn.PReLU()
     self.maxpool = nn.MaxPool2D(kernel_size=2, stride=2)
     self.layer1 = self._make_layer(block, 64, layers[0])
     self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
     self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
     self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
     self.bn4 = nn.BatchNorm2D(512)
     self.dropout = nn.Dropout()
     self.flatten = nn.Flatten()
     self.fc5 = nn.Linear(512 * 7 * 7, 512)
     self.bn5 = nn.BatchNorm1D(512)
Пример #20
0
 def __init__(self, in_channels, out_channels):
     super().__init__()
     branch_channels = out_channels // 5
     remain_channels = out_channels - branch_channels * 4
     self.conv1 = nn.Conv2D(in_channels,
                            branch_channels,
                            3,
                            stride=2,
                            padding=1,
                            bias_attr=False)
     self.d_conv1 = nn.Conv2D(branch_channels,
                              remain_channels,
                              3,
                              padding=1,
                              bias_attr=False)
     self.d_conv2 = nn.Conv2D(branch_channels,
                              branch_channels,
                              3,
                              padding=2,
                              dilation=2,
                              bias_attr=False)
     self.d_conv4 = nn.Conv2D(branch_channels,
                              branch_channels,
                              3,
                              padding=4,
                              dilation=4,
                              bias_attr=False)
     self.d_conv8 = nn.Conv2D(branch_channels,
                              branch_channels,
                              3,
                              padding=8,
                              dilation=8,
                              bias_attr=False)
     self.d_conv16 = nn.Conv2D(branch_channels,
                               branch_channels,
                               3,
                               padding=16,
                               dilation=16,
                               bias_attr=False)
     self.bn = layers.SyncBatchNorm(out_channels)
     self.act = nn.PReLU(out_channels)
Пример #21
0
 def __init__(self,
              inplanes,
              planes,
              stride=1,
              downsample=None,
              groups=1,
              base_width=64,
              dilation=1):
     super(IBasicBlock, self).__init__()
     if groups != 1 or base_width != 64:
         raise ValueError(
             'BasicBlock only supports groups=1 and base_width=64')
     if dilation > 1:
         raise NotImplementedError(
             "Dilation > 1 not supported in BasicBlock")
     self.bn1 = nn.BatchNorm2D(inplanes, epsilon=1e-05, momentum=0.1)
     self.conv1 = conv3x3(inplanes, planes)
     self.bn2 = nn.BatchNorm2D(planes, epsilon=1e-05, momentum=0.1)
     self.prelu = nn.PReLU(planes)
     self.conv2 = conv3x3(planes, planes, stride)
     self.bn3 = nn.BatchNorm2D(planes, epsilon=1e-05, momentum=0.1)
     self.downsample = downsample
     self.stride = stride
Пример #22
0
 def __init__(self,
              in_channels,
              out_channels,
              branches=4,
              kernel_size_maximum=9,
              shortcut=True):
     super().__init__()
     if out_channels < in_channels:
         raise RuntimeError(
             "The out_channes for DownSampler should be bigger than in_channels, but got in_channles={}, out_channels={}"
             .format(in_channels, out_channels))
     self.eesp = EESP(in_channels,
                      out_channels - in_channels,
                      stride=2,
                      branches=branches,
                      kernel_size_maximum=kernel_size_maximum,
                      down_method='avg')
     self.avg = nn.AvgPool2D(kernel_size=3, padding=1, stride=2)
     if shortcut:
         self.shortcut_layer = nn.Sequential(
             layers.ConvBNPReLU(3, 3, 3, stride=1, bias_attr=False),
             layers.ConvBN(3, out_channels, 1, stride=1, bias_attr=False),
         )
     self._act = nn.PReLU()
Пример #23
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 stride=1,
                 branches=4,
                 kernel_size_maximum=7,
                 down_method='esp'):
        super(EESP, self).__init__()
        if out_channels % branches != 0:
            raise RuntimeError(
                "The out_channes for EESP should be factorized by branches, but out_channels={} cann't be factorized by branches={}"
                .format(out_channels, branches))
        assert down_method in [
            'avg', 'esp'
        ], "The down_method for EESP only support 'avg' or 'esp', but got down_method={}".format(
            down_method)
        self.in_channels = in_channels
        self.stride = stride

        in_branch_channels = int(out_channels / branches)
        self.group_conv_in = layers.ConvBNPReLU(in_channels,
                                                in_branch_channels,
                                                1,
                                                stride=1,
                                                groups=branches,
                                                bias_attr=False)

        map_ksize_dilation = {
            3: 1,
            5: 2,
            7: 3,
            9: 4,
            11: 5,
            13: 6,
            15: 7,
            17: 8
        }
        self.kernel_sizes = []
        for i in range(branches):
            kernel_size = 3 + 2 * i
            kernel_size = kernel_size if kernel_size <= kernel_size_maximum else 3
            self.kernel_sizes.append(kernel_size)
        self.kernel_sizes.sort()

        self.spp_modules = nn.LayerList()
        for i in range(branches):
            dilation = map_ksize_dilation[self.kernel_sizes[i]]
            self.spp_modules.append(
                nn.Conv2D(in_branch_channels,
                          in_branch_channels,
                          kernel_size=3,
                          padding='same',
                          stride=stride,
                          dilation=dilation,
                          groups=in_branch_channels,
                          bias_attr=False))
        self.group_conv_out = layers.ConvBN(out_channels,
                                            out_channels,
                                            kernel_size=1,
                                            stride=1,
                                            groups=branches,
                                            bias_attr=False)
        self.bn_act = BNPReLU(out_channels)
        self._act = nn.PReLU()
        self.down_method = True if down_method == 'avg' else False
Пример #24
0
 def __init__(self, channels):
     super().__init__()
     self.bn = layers.SyncBatchNorm(channels)
     self.act = nn.PReLU(channels)
Пример #25
0
    def func_test_layer_str(self):
        module = nn.ELU(0.2)
        self.assertEqual(str(module), 'ELU(alpha=0.2)')

        module = nn.CELU(0.2)
        self.assertEqual(str(module), 'CELU(alpha=0.2)')

        module = nn.GELU(True)
        self.assertEqual(str(module), 'GELU(approximate=True)')

        module = nn.Hardshrink()
        self.assertEqual(str(module), 'Hardshrink(threshold=0.5)')

        module = nn.Hardswish(name="Hardswish")
        self.assertEqual(str(module), 'Hardswish(name=Hardswish)')

        module = nn.Tanh(name="Tanh")
        self.assertEqual(str(module), 'Tanh(name=Tanh)')

        module = nn.Hardtanh(name="Hardtanh")
        self.assertEqual(str(module),
                         'Hardtanh(min=-1.0, max=1.0, name=Hardtanh)')

        module = nn.PReLU(1, 0.25, name="PReLU", data_format="NCHW")
        self.assertEqual(
            str(module),
            'PReLU(num_parameters=1, data_format=NCHW, init=0.25, dtype=float32, name=PReLU)'
        )

        module = nn.ReLU()
        self.assertEqual(str(module), 'ReLU()')

        module = nn.ReLU6()
        self.assertEqual(str(module), 'ReLU6()')

        module = nn.SELU()
        self.assertEqual(
            str(module),
            'SELU(scale=1.0507009873554805, alpha=1.6732632423543772)')

        module = nn.LeakyReLU()
        self.assertEqual(str(module), 'LeakyReLU(negative_slope=0.01)')

        module = nn.Sigmoid()
        self.assertEqual(str(module), 'Sigmoid()')

        module = nn.Hardsigmoid()
        self.assertEqual(str(module), 'Hardsigmoid()')

        module = nn.Softplus()
        self.assertEqual(str(module), 'Softplus(beta=1, threshold=20)')

        module = nn.Softshrink()
        self.assertEqual(str(module), 'Softshrink(threshold=0.5)')

        module = nn.Softsign()
        self.assertEqual(str(module), 'Softsign()')

        module = nn.Swish()
        self.assertEqual(str(module), 'Swish()')

        module = nn.Tanhshrink()
        self.assertEqual(str(module), 'Tanhshrink()')

        module = nn.ThresholdedReLU()
        self.assertEqual(str(module), 'ThresholdedReLU(threshold=1.0)')

        module = nn.LogSigmoid()
        self.assertEqual(str(module), 'LogSigmoid()')

        module = nn.Softmax()
        self.assertEqual(str(module), 'Softmax(axis=-1)')

        module = nn.LogSoftmax()
        self.assertEqual(str(module), 'LogSoftmax(axis=-1)')

        module = nn.Maxout(groups=2)
        self.assertEqual(str(module), 'Maxout(groups=2, axis=1)')

        module = nn.Linear(2, 4, name='linear')
        self.assertEqual(
            str(module),
            'Linear(in_features=2, out_features=4, dtype=float32, name=linear)'
        )

        module = nn.Upsample(size=[12, 12])
        self.assertEqual(
            str(module),
            'Upsample(size=[12, 12], mode=nearest, align_corners=False, align_mode=0, data_format=NCHW)'
        )

        module = nn.UpsamplingNearest2D(size=[12, 12])
        self.assertEqual(
            str(module),
            'UpsamplingNearest2D(size=[12, 12], data_format=NCHW)')

        module = nn.UpsamplingBilinear2D(size=[12, 12])
        self.assertEqual(
            str(module),
            'UpsamplingBilinear2D(size=[12, 12], data_format=NCHW)')

        module = nn.Bilinear(in1_features=5, in2_features=4, out_features=1000)
        self.assertEqual(
            str(module),
            'Bilinear(in1_features=5, in2_features=4, out_features=1000, dtype=float32)'
        )

        module = nn.Dropout(p=0.5)
        self.assertEqual(str(module),
                         'Dropout(p=0.5, axis=None, mode=upscale_in_train)')

        module = nn.Dropout2D(p=0.5)
        self.assertEqual(str(module), 'Dropout2D(p=0.5, data_format=NCHW)')

        module = nn.Dropout3D(p=0.5)
        self.assertEqual(str(module), 'Dropout3D(p=0.5, data_format=NCDHW)')

        module = nn.AlphaDropout(p=0.5)
        self.assertEqual(str(module), 'AlphaDropout(p=0.5)')

        module = nn.Pad1D(padding=[1, 2], mode='constant')
        self.assertEqual(
            str(module),
            'Pad1D(padding=[1, 2], mode=constant, value=0.0, data_format=NCL)')

        module = nn.Pad2D(padding=[1, 0, 1, 2], mode='constant')
        self.assertEqual(
            str(module),
            'Pad2D(padding=[1, 0, 1, 2], mode=constant, value=0.0, data_format=NCHW)'
        )

        module = nn.ZeroPad2D(padding=[1, 0, 1, 2])
        self.assertEqual(str(module),
                         'ZeroPad2D(padding=[1, 0, 1, 2], data_format=NCHW)')

        module = nn.Pad3D(padding=[1, 0, 1, 2, 0, 0], mode='constant')
        self.assertEqual(
            str(module),
            'Pad3D(padding=[1, 0, 1, 2, 0, 0], mode=constant, value=0.0, data_format=NCDHW)'
        )

        module = nn.CosineSimilarity(axis=0)
        self.assertEqual(str(module), 'CosineSimilarity(axis=0, eps=1e-08)')

        module = nn.Embedding(10, 3, sparse=True)
        self.assertEqual(str(module), 'Embedding(10, 3, sparse=True)')

        module = nn.Conv1D(3, 2, 3)
        self.assertEqual(str(module),
                         'Conv1D(3, 2, kernel_size=[3], data_format=NCL)')

        module = nn.Conv1DTranspose(2, 1, 2)
        self.assertEqual(
            str(module),
            'Conv1DTranspose(2, 1, kernel_size=[2], data_format=NCL)')

        module = nn.Conv2D(4, 6, (3, 3))
        self.assertEqual(str(module),
                         'Conv2D(4, 6, kernel_size=[3, 3], data_format=NCHW)')

        module = nn.Conv2DTranspose(4, 6, (3, 3))
        self.assertEqual(
            str(module),
            'Conv2DTranspose(4, 6, kernel_size=[3, 3], data_format=NCHW)')

        module = nn.Conv3D(4, 6, (3, 3, 3))
        self.assertEqual(
            str(module),
            'Conv3D(4, 6, kernel_size=[3, 3, 3], data_format=NCDHW)')

        module = nn.Conv3DTranspose(4, 6, (3, 3, 3))
        self.assertEqual(
            str(module),
            'Conv3DTranspose(4, 6, kernel_size=[3, 3, 3], data_format=NCDHW)')

        module = nn.PairwiseDistance()
        self.assertEqual(str(module), 'PairwiseDistance(p=2.0)')

        module = nn.InstanceNorm1D(2)
        self.assertEqual(str(module),
                         'InstanceNorm1D(num_features=2, epsilon=1e-05)')

        module = nn.InstanceNorm2D(2)
        self.assertEqual(str(module),
                         'InstanceNorm2D(num_features=2, epsilon=1e-05)')

        module = nn.InstanceNorm3D(2)
        self.assertEqual(str(module),
                         'InstanceNorm3D(num_features=2, epsilon=1e-05)')

        module = nn.GroupNorm(num_channels=6, num_groups=6)
        self.assertEqual(
            str(module),
            'GroupNorm(num_groups=6, num_channels=6, epsilon=1e-05)')

        module = nn.LayerNorm([2, 2, 3])
        self.assertEqual(
            str(module),
            'LayerNorm(normalized_shape=[2, 2, 3], epsilon=1e-05)')

        module = nn.BatchNorm1D(1)
        self.assertEqual(
            str(module),
            'BatchNorm1D(num_features=1, momentum=0.9, epsilon=1e-05, data_format=NCL)'
        )

        module = nn.BatchNorm2D(1)
        self.assertEqual(
            str(module),
            'BatchNorm2D(num_features=1, momentum=0.9, epsilon=1e-05)')

        module = nn.BatchNorm3D(1)
        self.assertEqual(
            str(module),
            'BatchNorm3D(num_features=1, momentum=0.9, epsilon=1e-05, data_format=NCDHW)'
        )

        module = nn.SyncBatchNorm(2)
        self.assertEqual(
            str(module),
            'SyncBatchNorm(num_features=2, momentum=0.9, epsilon=1e-05)')

        module = nn.LocalResponseNorm(size=5)
        self.assertEqual(
            str(module),
            'LocalResponseNorm(size=5, alpha=0.0001, beta=0.75, k=1.0)')

        module = nn.AvgPool1D(kernel_size=2, stride=2, padding=0)
        self.assertEqual(str(module),
                         'AvgPool1D(kernel_size=2, stride=2, padding=0)')

        module = nn.AvgPool2D(kernel_size=2, stride=2, padding=0)
        self.assertEqual(str(module),
                         'AvgPool2D(kernel_size=2, stride=2, padding=0)')

        module = nn.AvgPool3D(kernel_size=2, stride=2, padding=0)
        self.assertEqual(str(module),
                         'AvgPool3D(kernel_size=2, stride=2, padding=0)')

        module = nn.MaxPool1D(kernel_size=2, stride=2, padding=0)
        self.assertEqual(str(module),
                         'MaxPool1D(kernel_size=2, stride=2, padding=0)')

        module = nn.MaxPool2D(kernel_size=2, stride=2, padding=0)
        self.assertEqual(str(module),
                         'MaxPool2D(kernel_size=2, stride=2, padding=0)')

        module = nn.MaxPool3D(kernel_size=2, stride=2, padding=0)
        self.assertEqual(str(module),
                         'MaxPool3D(kernel_size=2, stride=2, padding=0)')

        module = nn.AdaptiveAvgPool1D(output_size=16)
        self.assertEqual(str(module), 'AdaptiveAvgPool1D(output_size=16)')

        module = nn.AdaptiveAvgPool2D(output_size=3)
        self.assertEqual(str(module), 'AdaptiveAvgPool2D(output_size=3)')

        module = nn.AdaptiveAvgPool3D(output_size=3)
        self.assertEqual(str(module), 'AdaptiveAvgPool3D(output_size=3)')

        module = nn.AdaptiveMaxPool1D(output_size=16, return_mask=True)
        self.assertEqual(
            str(module), 'AdaptiveMaxPool1D(output_size=16, return_mask=True)')

        module = nn.AdaptiveMaxPool2D(output_size=3, return_mask=True)
        self.assertEqual(str(module),
                         'AdaptiveMaxPool2D(output_size=3, return_mask=True)')

        module = nn.AdaptiveMaxPool3D(output_size=3, return_mask=True)
        self.assertEqual(str(module),
                         'AdaptiveMaxPool3D(output_size=3, return_mask=True)')

        module = nn.SimpleRNNCell(16, 32)
        self.assertEqual(str(module), 'SimpleRNNCell(16, 32)')

        module = nn.LSTMCell(16, 32)
        self.assertEqual(str(module), 'LSTMCell(16, 32)')

        module = nn.GRUCell(16, 32)
        self.assertEqual(str(module), 'GRUCell(16, 32)')

        module = nn.PixelShuffle(3)
        self.assertEqual(str(module), 'PixelShuffle(upscale_factor=3)')

        module = nn.SimpleRNN(16, 32, 2)
        self.assertEqual(
            str(module),
            'SimpleRNN(16, 32, num_layers=2\n  (0): RNN(\n    (cell): SimpleRNNCell(16, 32)\n  )\n  (1): RNN(\n    (cell): SimpleRNNCell(32, 32)\n  )\n)'
        )

        module = nn.LSTM(16, 32, 2)
        self.assertEqual(
            str(module),
            'LSTM(16, 32, num_layers=2\n  (0): RNN(\n    (cell): LSTMCell(16, 32)\n  )\n  (1): RNN(\n    (cell): LSTMCell(32, 32)\n  )\n)'
        )

        module = nn.GRU(16, 32, 2)
        self.assertEqual(
            str(module),
            'GRU(16, 32, num_layers=2\n  (0): RNN(\n    (cell): GRUCell(16, 32)\n  )\n  (1): RNN(\n    (cell): GRUCell(32, 32)\n  )\n)'
        )

        module1 = nn.Sequential(
            ('conv1', nn.Conv2D(1, 20, 5)), ('relu1', nn.ReLU()),
            ('conv2', nn.Conv2D(20, 64, 5)), ('relu2', nn.ReLU()))
        self.assertEqual(
            str(module1),
            'Sequential(\n  '\
            '(conv1): Conv2D(1, 20, kernel_size=[5, 5], data_format=NCHW)\n  '\
            '(relu1): ReLU()\n  '\
            '(conv2): Conv2D(20, 64, kernel_size=[5, 5], data_format=NCHW)\n  '\
            '(relu2): ReLU()\n)'
        )

        module2 = nn.Sequential(
            nn.Conv3DTranspose(4, 6, (3, 3, 3)),
            nn.AvgPool3D(kernel_size=2, stride=2, padding=0),
            nn.Tanh(name="Tanh"), module1, nn.Conv3D(4, 6, (3, 3, 3)),
            nn.MaxPool3D(kernel_size=2, stride=2, padding=0), nn.GELU(True))
        self.assertEqual(
            str(module2),
            'Sequential(\n  '\
            '(0): Conv3DTranspose(4, 6, kernel_size=[3, 3, 3], data_format=NCDHW)\n  '\
            '(1): AvgPool3D(kernel_size=2, stride=2, padding=0)\n  '\
            '(2): Tanh(name=Tanh)\n  '\
            '(3): Sequential(\n    (conv1): Conv2D(1, 20, kernel_size=[5, 5], data_format=NCHW)\n    (relu1): ReLU()\n'\
            '    (conv2): Conv2D(20, 64, kernel_size=[5, 5], data_format=NCHW)\n    (relu2): ReLU()\n  )\n  '\
            '(4): Conv3D(4, 6, kernel_size=[3, 3, 3], data_format=NCDHW)\n  '\
            '(5): MaxPool3D(kernel_size=2, stride=2, padding=0)\n  '\
            '(6): GELU(approximate=True)\n)'
        )
Пример #26
0
    def __init__(self,
                 block,
                 layers,
                 dropout=0,
                 num_features=512,
                 zero_init_residual=False,
                 groups=1,
                 width_per_group=64,
                 replace_stride_with_dilation=None,
                 fp16=False):
        super(IResNet, self).__init__()
        self.fp16 = fp16
        self.inplanes = 64
        self.dilation = 1
        if replace_stride_with_dilation is None:
            replace_stride_with_dilation = [False, False, False]
        if len(replace_stride_with_dilation) != 3:
            raise ValueError("replace_stride_with_dilation should be None "
                             "or a 3-element tuple, got {}".format(
                                 replace_stride_with_dilation))
        self.groups = groups
        self.base_width = width_per_group
        self.conv1 = nn.Conv2D(3,
                               self.inplanes,
                               kernel_size=3,
                               stride=1,
                               padding=1,
                               bias_attr=False)
        self.bn1 = nn.BatchNorm2D(self.inplanes, epsilon=1e-05, momentum=0.1)
        self.prelu = nn.PReLU(self.inplanes)
        self.layer1 = self._make_layer(block, 64, layers[0], stride=2)
        self.layer2 = self._make_layer(block,
                                       128,
                                       layers[1],
                                       stride=2,
                                       dilate=replace_stride_with_dilation[0])
        self.layer3 = self._make_layer(block,
                                       256,
                                       layers[2],
                                       stride=2,
                                       dilate=replace_stride_with_dilation[1])
        self.layer4 = self._make_layer(block,
                                       512,
                                       layers[3],
                                       stride=2,
                                       dilate=replace_stride_with_dilation[2])
        self.bn2 = nn.BatchNorm2D(512 * block.expansion,
                                  epsilon=1e-05,
                                  momentum=0.1)
        self.dropout = nn.Dropout(p=dropout)
        self.fc = nn.Linear(512 * block.expansion * self.fc_scale,
                            num_features)
        self.features = nn.BatchNorm1D(num_features,
                                       momentum=0.1,
                                       epsilon=1e-05)
        self.features.weight = paddle.create_parameter(
            shape=self.features.weight.shape,
            dtype='float32',
            default_initializer=nn.initializer.Constant(value=1.0))
        # nn.init.constant_(self.features.weight, 1.0)
        # 修改了stop_gradient,将True设为False
        self.features.weight.stop_gradient = False
        #self.features.weight.requires_grad = False

        for m in self.sublayers():
            if isinstance(m, nn.Conv2D):
                m.weight = paddle.create_parameter(
                    shape=m.weight.shape,
                    dtype='float32',
                    default_initializer=nn.initializer.Normal(mean=0.0,
                                                              std=0.1))
                # nn.init.normal_(m.weight, 0, 0.1)
            elif isinstance(m, (nn.BatchNorm2D, nn.GroupNorm)):
                m.weight = paddle.create_parameter(
                    shape=m.weight.shape,
                    dtype='float32',
                    default_initializer=nn.initializer.Constant(value=1.0))
                m.bias = paddle.create_parameter(
                    shape=m.bias.shape,
                    dtype='float32',
                    default_initializer=nn.initializer.Constant(value=0.0))
                # nn.init.constant_(m.weight, 1)
                # nn.init.constant_(m.bias, 0)

        if zero_init_residual:
            for m in self.sublayers():
                if isinstance(m, IBasicBlock):
                    m.bn2.weight = paddle.create_parameter(
                        shape=m.bn2.weight.shape,
                        dtype='float32',
                        default_initializer=nn.initializer.Constant(value=0.0))