def __init__(self, num_layers, mode='ir', opts=None):
     super(BackboneEncoderUsingLastLayerIntoWPlus, self).__init__()
     print('Using BackboneEncoderUsingLastLayerIntoWPlus')
     assert num_layers in [50, 100,
                           152], 'num_layers should be 50,100, or 152'
     assert mode in ['ir', 'ir_se'], 'mode should be ir or ir_se'
     blocks = get_blocks(num_layers)
     if mode == 'ir':
         unit_module = bottleneck_IR
     elif mode == 'ir_se':
         unit_module = bottleneck_IR_SE
     self.input_layer = Sequential(
         Conv2D(opts.input_nc, 64, (3, 3), 1, 1, bias_attr=False),
         BatchNorm2D(64), PReLU(64))
     self.output_layer_2 = Sequential(BatchNorm2D(512),
                                      paddle.nn.AdaptiveAvgPool2D((7, 7)),
                                      Flatten(), Linear(512 * 7 * 7, 512))
     self.linear = EqualLinear(512, 512 * 18, lr_mul=1)
     modules = []
     for block in blocks:
         for bottleneck in block:
             modules.append(
                 unit_module(bottleneck.in_channel, bottleneck.depth,
                             bottleneck.stride))
     self.body = Sequential(*modules)
Пример #2
0
 def __init__(self,
              inplanes,
              planes,
              stride=1,
              downsample=None,
              zero_init_residual=True):
     super(BasicBlock, self).__init__()
     self.conv1 = conv3x3(inplanes, planes, stride)
     self.bn1 = BatchNorm2D(
         planes,
         weight_attr=paddle.ParamAttr(
             regularizer=paddle.regularizer.L1Decay(0.0)),
         bias_attr=paddle.ParamAttr(
             regularizer=paddle.regularizer.L1Decay(0.0)))
     self.relu = ReLU()
     self.conv2 = conv3x3(planes, planes)
     if zero_init_residual:
         self.bn2 = BatchNorm2D(
             planes,
             weight_attr=paddle.ParamAttr(
                 initializer=nn.initializer.Constant(value=0.0),
                 regularizer=paddle.regularizer.L1Decay(0.0)),
             bias_attr=paddle.ParamAttr(
                 regularizer=paddle.regularizer.L1Decay(0.0)))
     else:
         self.bn2 = BatchNorm2D(
             planes,
             weight_attr=paddle.ParamAttr(
                 regularizer=paddle.regularizer.L1Decay(0.0)),
             bias_attr=paddle.ParamAttr(
                 regularizer=paddle.regularizer.L1Decay(0.0)))
     self.downsample = downsample
     self.stride = stride
Пример #3
0
    def __init__(self, input_size, num_layers, out_dim, mode='ir'):
        super(Backbone, self).__init__()
        assert input_size[0] in [
            112, 224
        ], "input_size should be [112, 112] or [224, 224]"
        assert num_layers in [34, 50, 100,
                              152], "num_layers should be 50, 100 or 152"
        assert mode in ['ir', 'ir_se'], "mode should be ir or ir_se"
        blocks = get_blocks(num_layers)
        if mode == 'ir':
            unit_module = BottleneckIR
        elif mode == 'ir_se':
            unit_module = BottleneckIRSE
        self.input_layer = Sequential(
            Conv2D(3, 64, (3, 3), 1, 1, bias_attr=False), BatchNorm2D(64),
            PReLU(64))
        if input_size[0] == 112:
            self.output_layer = Sequential(BatchNorm2D(512), Dropout(),
                                           Flatten(),
                                           Linear(512 * 7 * 7, out_dim),
                                           BatchNorm1D(out_dim))
        else:
            self.output_layer = Sequential(BatchNorm2D(512), Dropout(),
                                           Flatten(),
                                           Linear(512 * 14 * 14, out_dim),
                                           BatchNorm1D(out_dim))

        modules = []
        for block in blocks:
            for bottleneck in block:
                modules.append(
                    unit_module(bottleneck.in_channel, bottleneck.depth,
                                bottleneck.stride))
        self.body = Sequential(*modules)
Пример #4
0
    def __init__(self, num_classes=10):
        super(ImperativeLenetWithSkipQuant, self).__init__()

        conv2d_w1_attr = fluid.ParamAttr(name="conv2d_w_1")
        conv2d_w2_attr = fluid.ParamAttr(name="conv2d_w_2")
        fc_w1_attr = fluid.ParamAttr(name="fc_w_1")
        fc_w2_attr = fluid.ParamAttr(name="fc_w_2")
        fc_w3_attr = fluid.ParamAttr(name="fc_w_3")
        conv2d_b1_attr = fluid.ParamAttr(name="conv2d_b_1")
        conv2d_b2_attr = fluid.ParamAttr(name="conv2d_b_2")
        fc_b1_attr = fluid.ParamAttr(name="fc_b_1")
        fc_b2_attr = fluid.ParamAttr(name="fc_b_2")
        fc_b3_attr = fluid.ParamAttr(name="fc_b_3")
        self.conv2d_0 = Conv2D(in_channels=1,
                               out_channels=6,
                               kernel_size=3,
                               stride=1,
                               padding=1,
                               weight_attr=conv2d_w1_attr,
                               bias_attr=conv2d_b1_attr)
        self.conv2d_0.skip_quant = True

        self.batch_norm_0 = BatchNorm2D(6)
        self.relu_0 = ReLU()
        self.pool2d_0 = MaxPool2D(kernel_size=2, stride=2)
        self.conv2d_1 = Conv2D(in_channels=6,
                               out_channels=16,
                               kernel_size=5,
                               stride=1,
                               padding=0,
                               weight_attr=conv2d_w2_attr,
                               bias_attr=conv2d_b2_attr)
        self.conv2d_1.skip_quant = False

        self.batch_norm_1 = BatchNorm2D(16)
        self.relu6_0 = ReLU6()
        self.pool2d_1 = MaxPool2D(kernel_size=2, stride=2)
        self.linear_0 = Linear(in_features=400,
                               out_features=120,
                               weight_attr=fc_w1_attr,
                               bias_attr=fc_b1_attr)
        self.linear_0.skip_quant = True

        self.leaky_relu_0 = LeakyReLU()
        self.linear_1 = Linear(in_features=120,
                               out_features=84,
                               weight_attr=fc_w2_attr,
                               bias_attr=fc_b2_attr)
        self.linear_1.skip_quant = False

        self.sigmoid_0 = Sigmoid()
        self.linear_2 = Linear(in_features=84,
                               out_features=num_classes,
                               weight_attr=fc_w3_attr,
                               bias_attr=fc_b3_attr)
        self.linear_2.skip_quant = False
        self.softmax_0 = Softmax()
    def __init__(self, num_classes=10):
        super(ImperativeLenet, self).__init__()
        conv2d_w1_attr = fluid.ParamAttr(name="conv2d_w_1")
        conv2d_w2_attr = fluid.ParamAttr(name="conv2d_w_2")
        fc_w1_attr = fluid.ParamAttr(name="fc_w_1")
        fc_w2_attr = fluid.ParamAttr(name="fc_w_2")
        fc_w3_attr = fluid.ParamAttr(name="fc_w_3")
        conv2d_b2_attr = fluid.ParamAttr(name="conv2d_b_2")
        fc_b1_attr = fluid.ParamAttr(name="fc_b_1")
        fc_b2_attr = fluid.ParamAttr(name="fc_b_2")
        fc_b3_attr = fluid.ParamAttr(name="fc_b_3")
        self.features = Sequential(
            Conv2D(
                in_channels=1,
                out_channels=6,
                kernel_size=3,
                stride=1,
                padding=1,
                weight_attr=conv2d_w1_attr,
                bias_attr=False),
            BatchNorm2D(6),
            ReLU(),
            MaxPool2D(
                kernel_size=2, stride=2),
            Conv2D(
                in_channels=6,
                out_channels=16,
                kernel_size=5,
                stride=1,
                padding=0,
                weight_attr=conv2d_w2_attr,
                bias_attr=conv2d_b2_attr),
            BatchNorm2D(16),
            PReLU(),
            MaxPool2D(
                kernel_size=2, stride=2))

        self.fc = Sequential(
            Linear(
                in_features=400,
                out_features=120,
                weight_attr=fc_w1_attr,
                bias_attr=fc_b1_attr),
            LeakyReLU(),
            Linear(
                in_features=120,
                out_features=84,
                weight_attr=fc_w2_attr,
                bias_attr=fc_b2_attr),
            Sigmoid(),
            Linear(
                in_features=84,
                out_features=num_classes,
                weight_attr=fc_w3_attr,
                bias_attr=fc_b3_attr),
            Softmax())
 def __init__(self, in_channel, depth, stride):
     super(bottleneck_IR, self).__init__()
     if in_channel == depth:
         self.shortcut_layer = MaxPool2D(1, stride)
     else:
         self.shortcut_layer = Sequential(
             Conv2D(in_channel, depth, (1, 1), stride, bias_attr=False),
             BatchNorm2D(depth))
     self.res_layer = Sequential(
         BatchNorm2D(in_channel),
         Conv2D(in_channel, depth, (3, 3), (1, 1), 1, bias_attr=False),
         PReLU(depth),
         Conv2D(depth, depth, (3, 3), stride, 1, bias_attr=False),
         BatchNorm2D(depth))
Пример #7
0
    def __init__(self, input_size, block, layers, zero_init_residual=True):
        super(ResNet, self).__init__()
        assert input_size[0] in [
            112, 224
        ], "input_size should be [112, 112] or [224, 224]"
        self.inplanes = 64
        self.zero_init_residual = zero_init_residual
        self.conv1 = Conv2D(3,
                            64,
                            kernel_size=7,
                            stride=2,
                            padding=3,
                            weight_attr=paddle.ParamAttr(
                                initializer=nn.initializer.KaimingNormal()))
        self.bn1 = BatchNorm2D(
            64,
            weight_attr=paddle.ParamAttr(
                initializer=nn.initializer.Constant(value=1),
                regularizer=paddle.regularizer.L1Decay(0.0)),
            bias_attr=paddle.ParamAttr(
                initializer=nn.initializer.Constant(value=0),
                regularizer=paddle.regularizer.L1Decay(0.0)))
        self.relu = ReLU()
        self.maxpool = MaxPool2D(kernel_size=3, stride=2, padding=1)
        self.layer1 = self._make_layer(block, 64, layers[0])
        self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
        self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
        self.layer4 = self._make_layer(block, 512, layers[3], stride=2)

        self.bn_o1 = BatchNorm2D(
            2048,
            weight_attr=paddle.ParamAttr(
                initializer=nn.initializer.Constant(value=0.0),
                regularizer=paddle.regularizer.L1Decay(0.0)),
            bias_attr=paddle.ParamAttr(
                regularizer=paddle.regularizer.L1Decay(0.0)))
        self.dropout = Dropout()
        if input_size[0] == 112:
            self.fc = Linear(2048 * 4 * 4, 512)
        else:
            self.fc = Linear(2048 * 8 * 8, 512)
        self.bn_o2 = BatchNorm1D(
            512,
            weight_attr=paddle.ParamAttr(
                initializer=nn.initializer.Constant(value=0.0),
                regularizer=paddle.regularizer.L1Decay(0.0)),
            bias_attr=paddle.ParamAttr(
                regularizer=paddle.regularizer.L1Decay(0.0)))
Пример #8
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 kernel_size,
                 stride,
                 padding,
                 groups=1,
                 act=None):
        super(ConvBNLayer, self).__init__()
        self._conv = Conv2D(in_channels=in_channels,
                            out_channels=out_channels,
                            kernel_size=kernel_size,
                            stride=stride,
                            padding=padding,
                            groups=groups,
                            weight_attr=ParamAttr(initializer=KaimingNormal()),
                            bias_attr=False)

        self._batch_norm = BatchNorm2D(
            out_channels,
            weight_attr=ParamAttr(regularizer=L2Decay(0.0)),
            bias_attr=ParamAttr(regularizer=L2Decay(0.0)))
        if act == "hard_swish":
            act = 'hardswish'
        self.act = act
Пример #9
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 kernel_size,
                 stride=1,
                 groups=1,
                 act=None,
                 name=None,
                 data_format="NCHW"):
        super(ConvBNLayer, self).__init__()
        self._conv = Conv2D(in_channels=in_channels,
                            out_channels=out_channels,
                            kernel_size=kernel_size,
                            stride=stride,
                            padding=(kernel_size - 1) // 2,
                            groups=groups,
                            weight_attr=ParamAttr(name=name + "_weights"),
                            bias_attr=False,
                            data_format=data_format)
        if name == "conv1":
            bn_name = "bn_" + name
        else:
            bn_name = "bn" + name[3:]

        self._act = act

        self._batch_norm = BatchNorm2D(
            out_channels,
            weight_attr=ParamAttr(name=bn_name + "_scale",
                                  regularizer=L2Decay(0.0)),
            bias_attr=ParamAttr(name=bn_name + "_offset",
                                regularizer=L2Decay(0.0)),
            data_format=data_format)
Пример #10
0
 def __init__(self):
     super(ModelCase6, self).__init__()
     self.bn1 = BatchNorm2D(3)
     self.relu1 = ReLU()
     self.fc1 = paddle.nn.Linear(3 * 16 * 16, 3 * 16 * 16)
     self.dp = paddle.nn.Dropout(p=0.5)
     self.lstm = paddle.nn.LSTM(1536,
                                10,
                                direction='bidirectional',
                                num_layers=2)
Пример #11
0
    def __init__(self,
                 num_channels,
                 num_filters,
                 filter_size,
                 stride=1,
                 groups=1,
                 padding=0):
        super(ConvBNLayer, self).__init__()

        self._conv = Conv2D(in_channels=num_channels,
                            out_channels=num_filters,
                            kernel_size=filter_size,
                            stride=stride,
                            padding=padding,
                            groups=groups,
                            bias_attr=False)

        self._batch_norm = BatchNorm2D(num_filters)
Пример #12
0
    def __init__(self, in_channels, out_channels):
        super(Decoder, self).__init__()

        self.relus = LayerList([ReLU() for i in range(2)])
        self.conv_transpose_01 = Conv2DTranspose(in_channels,
                                                 out_channels,
                                                 kernel_size=3,
                                                 padding=1)
        self.conv_transpose_02 = Conv2DTranspose(out_channels,
                                                 out_channels,
                                                 kernel_size=3,
                                                 padding=1)
        self.bns = LayerList([BatchNorm2D(out_channels) for i in range(2)])
        self.upsamples = LayerList(
            [Upsample(scale_factor=2.0) for i in range(2)])
        self.residual_conv = Conv2D(in_channels,
                                    out_channels,
                                    kernel_size=1,
                                    padding='same')
    def __init__(self, num_layers, mode='ir', opts=None):
        super(GradualStyleEncoder, self).__init__()
        assert num_layers in [50, 100,
                              152], 'num_layers should be 50,100, or 152'
        assert mode in ['ir', 'ir_se'], 'mode should be ir or ir_se'
        blocks = get_blocks(num_layers)
        if mode == 'ir':
            unit_module = bottleneck_IR
        elif mode == 'ir_se':
            unit_module = bottleneck_IR_SE
        self.input_layer = Sequential(
            Conv2D(opts.input_nc, 64, (3, 3), 1, 1, bias_attr=False),
            BatchNorm2D(64), PReLU(64))
        modules = []
        for block in blocks:
            for bottleneck in block:
                modules.append(
                    unit_module(bottleneck.in_channel, bottleneck.depth,
                                bottleneck.stride))
        self.body = Sequential(*modules)

        self.styles = nn.LayerList()
        self.style_count = 18
        self.coarse_ind = 3
        self.middle_ind = 7
        for i in range(self.style_count):
            if i < self.coarse_ind:
                style = GradualStyleBlock(512, 512, 16)
            elif i < self.middle_ind:
                style = GradualStyleBlock(512, 512, 32)
            else:
                style = GradualStyleBlock(512, 512, 64)
            self.styles.append(style)
        self.latlayer1 = nn.Conv2D(256,
                                   512,
                                   kernel_size=1,
                                   stride=1,
                                   padding=0)
        self.latlayer2 = nn.Conv2D(128,
                                   512,
                                   kernel_size=1,
                                   stride=1,
                                   padding=0)
Пример #14
0
    def __init__(self, in_channels, out_channels):
        super(Encoder, self).__init__()

        self.relus = LayerList([ReLU() for i in range(2)])
        self.separable_conv_01 = SeparableConv2D(in_channels,
                                                 out_channels,
                                                 kernel_size=3,
                                                 padding='same')
        self.bns = LayerList([BatchNorm2D(out_channels) for i in range(2)])

        self.separable_conv_02 = SeparableConv2D(out_channels,
                                                 out_channels,
                                                 kernel_size=3,
                                                 padding='same')
        self.pool = MaxPool2D(kernel_size=3, stride=2, padding=1)
        self.residual_conv = Conv2D(in_channels,
                                    out_channels,
                                    kernel_size=1,
                                    stride=2,
                                    padding='same')
Пример #15
0
    def __init__(self,
                 ch_in,
                 ch_out,
                 filter_size,
                 stride,
                 norm_type='bn',
                 norm_groups=32,
                 use_dcn=False,
                 norm_name=None,
                 name=None):
        super(ConvNormLayer, self).__init__()
        assert norm_type in ['bn', 'sync_bn', 'gn']

        self.conv = Conv2D(in_channels=ch_in,
                           out_channels=ch_out,
                           kernel_size=filter_size,
                           stride=stride,
                           padding=(filter_size - 1) // 2,
                           groups=1,
                           weight_attr=ParamAttr(name=name + "_weight",
                                                 initializer=Normal(mean=0.,
                                                                    std=0.01),
                                                 learning_rate=1.),
                           bias_attr=False)

        param_attr = ParamAttr(name=norm_name + "_scale",
                               learning_rate=1.,
                               regularizer=L2Decay(0.))
        bias_attr = ParamAttr(name=norm_name + "_offset",
                              learning_rate=1.,
                              regularizer=L2Decay(0.))
        if norm_type in ['bn', 'sync_bn']:
            self.norm = BatchNorm2D(ch_out,
                                    weight_attr=param_attr,
                                    bias_attr=bias_attr)
        elif norm_type == 'gn':
            self.norm = GroupNorm(num_groups=norm_groups,
                                  num_channels=ch_out,
                                  weight_attr=param_attr,
                                  bias_attr=bias_attr)
Пример #16
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 kernel_size,
                 stride=1,
                 groups=1,
                 is_tweaks_mode=False,
                 act=None,
                 name=None):
        super(ConvBNLayer, self).__init__()
        self.is_tweaks_mode = is_tweaks_mode
        #ResNet-D 1/2:add a 2×2 average pooling layer with a stride of 2 before the convolution,
        #             whose stride is changed to 1, works well in practice.
        self._pool2d_avg = AvgPool2D(kernel_size=2,
                                     stride=2,
                                     padding=0,
                                     ceil_mode=True)

        self._conv = Conv2D(in_channels=in_channels,
                            out_channels=out_channels,
                            kernel_size=kernel_size,
                            stride=stride,
                            padding=(kernel_size - 1) // 2,
                            groups=groups,
                            weight_attr=ParamAttr(name=name + "_weights"),
                            bias_attr=False)
        if name == "conv1":
            bn_name = "bn_" + name
        else:
            bn_name = "bn" + name[3:]

        self._act = act

        self._batch_norm = BatchNorm2D(
            out_channels,
            weight_attr=ParamAttr(name=bn_name + "_scale",
                                  regularizer=L2Decay(0.0)),
            bias_attr=ParamAttr(bn_name + "_offset", regularizer=L2Decay(0.0)))
Пример #17
0
    def _make_layer(self, block, planes, blocks, stride=1):
        downsample = None
        if stride != 1 or self.inplanes != planes * block.expansion:
            downsample = Sequential(
                conv1x1(self.inplanes, planes * block.expansion, stride),
                BatchNorm2D(
                    planes * block.expansion,
                    weight_attr=paddle.ParamAttr(
                        initializer=nn.initializer.Constant(value=0.0)),
                    bias_attr=paddle.ParamAttr(
                        regularizer=paddle.regularizer.L1Decay(0.0))),
            )

        layers = []
        layers.append(
            block(self.inplanes, planes, stride, downsample,
                  self.zero_init_residual))
        self.inplanes = planes * block.expansion
        for _ in range(1, blocks):
            layers.append(block(self.inplanes, planes,
                                self.zero_init_residual))

        return Sequential(*layers)
Пример #18
0
    def __init__(self,
                 num_channels,
                 filter_size,
                 num_filters,
                 stride,
                 num_groups=1):
        super().__init__()

        self.conv = Conv2D(
            in_channels=num_channels,
            out_channels=num_filters,
            kernel_size=filter_size,
            stride=stride,
            padding=(filter_size - 1) // 2,
            groups=num_groups,
            weight_attr=ParamAttr(initializer=KaimingNormal()),
            bias_attr=False)

        self.bn = BatchNorm2D(
            num_filters,
            weight_attr=ParamAttr(regularizer=L2Decay(0.0)),
            bias_attr=ParamAttr(regularizer=L2Decay(0.0)))
        self.hardswish = nn.Hardswish()
Пример #19
0
    def __init__(self, mode="train"):
        ClasModel = paddle.nn.Sequential(
            Conv2D(3, 6, (3, 3)),
            BatchNorm2D(6),
            ReLU(),
            Conv2D(6, 6, (3, 3)),
            BatchNorm2D(6),
            ReLU(),
            MaxPool2D((2, 2)),
            Conv2D(6, 12, (3, 3)),
            BatchNorm2D(12),
            ReLU(),
            Conv2D(12, 12, (3, 3)),
            BatchNorm2D(12),
            ReLU(),
            MaxPool2D((2, 2)),
            Conv2D(12, 8, (3, 3)),
            BatchNorm2D(8),
            ReLU(),
            Conv2D(8, 8, (3, 3)),
            BatchNorm2D(8),
            ReLU(),
            MaxPool2D((2, 2)),
            Flatten(),
            Linear(128, 128),
            ReLU(),
            Linear(128, 32),
            ReLU(),
            Linear(32, 2),
            Softmax(),
        )
        input = InputSpec([None, 3, 64, 64], "float32", "x")
        label = InputSpec([None, 1], "int32", "label")
        model = paddle.Model(ClasModel, inputs=input, labels=label)
        model.prepare(
            paddle.optimizer.Adam(parameters=ClasModel.parameters()),
            paddle.nn.CrossEntropyLoss(),
            paddle.metric.Accuracy(),
        )
        self.model = model

        if mode == "predict":
            self.load_weight()
    def __init__(self, input_nc, ndf=64, norm_type='instance'):
        """Construct a DCGAN discriminator

        Parameters:
            input_nc (int)  -- the number of channels in input images
            ndf (int)       -- the number of filters in the last conv layer
            norm_type (str)      -- normalization layer type
        """
        super(DCDiscriminator, self).__init__()
        norm_layer = build_norm_layer(norm_type)
        if type(
                norm_layer
        ) == functools.partial:  # no need to use bias as BatchNorm2d has affine parameters
            use_bias = norm_layer.func == nn.BatchNorm2D
        else:
            use_bias = norm_layer == nn.BatchNorm2D

        kw = 4
        padw = 1

        sequence = [
                nn.Conv2D(input_nc,
                          ndf,
                          kernel_size=kw,
                          stride=2,
                          padding=padw,
                          bias_attr=use_bias),
                nn.LeakyReLU(0.2)
            ]

        nf_mult = 1
        nf_mult_prev = 1
        n_downsampling = 4

        for n in range(1, n_downsampling):  # gradually increase the number of filters
            nf_mult_prev = nf_mult
            nf_mult = min(2**n, 8)
            if norm_type == 'batch':
                sequence += [
                    nn.Conv2D(ndf * nf_mult_prev,
                                  ndf * nf_mult,
                                  kernel_size=kw,
                                  stride=2,
                                  padding=padw),
                    BatchNorm2D(ndf * nf_mult),
                    nn.LeakyReLU(0.2)
                ]
            else:
                sequence += [
                    nn.Conv2D(ndf * nf_mult_prev,
                              ndf * nf_mult,
                              kernel_size=kw,
                              stride=2,
                              padding=padw,
                              bias_attr=use_bias),
                    norm_layer(ndf * nf_mult),
                    nn.LeakyReLU(0.2)
                ]

        nf_mult_prev = nf_mult

        sequence += [
                nn.Conv2D(ndf * nf_mult_prev,
                          1,
                          kernel_size=kw,
                          stride=1,
                          padding=0)
            ]  # output 1 channel prediction map

        self.model = nn.Sequential(*sequence)
Пример #21
0
    def __init__(self,
                 input_nz,
                 input_nc,
                 output_nc,
                 ngf=64,
                 norm_type='batch',
                 padding_type='reflect'):
        """Construct a DCGenerator generator

        Args:
            input_nz (int)      -- the number of dimension in input noise
            input_nc (int)      -- the number of channels in input images
            output_nc (int)     -- the number of channels in output images
            ngf (int)           -- the number of filters in the last conv layer
            norm_layer          -- normalization layer
            padding_type (str)  -- the name of padding layer in conv layers: reflect | replicate | zero
        """
        super(DCGenerator, self).__init__()

        norm_layer = build_norm_layer(norm_type)
        if type(norm_layer) == functools.partial:
            use_bias = norm_layer.func == nn.BatchNorm2D
        else:
            use_bias = norm_layer == nn.BatchNorm2D

        mult = 8
        n_downsampling = 4

        if norm_type == 'batch':
            model = [
                nn.Conv2DTranspose(input_nz,
                                   ngf * mult,
                                   kernel_size=4,
                                   stride=1,
                                   padding=0,
                                   bias_attr=use_bias),
                BatchNorm2D(ngf * mult),
                nn.ReLU()
            ]
        else:
            model = [
                nn.Conv2DTranspose(input_nz,
                                   ngf * mult,
                                   kernel_size=4,
                                   stride=1,
                                   padding=0,
                                   bias_attr=use_bias),
                norm_layer(ngf * mult),
                nn.ReLU()
            ]

        for i in range(1, n_downsampling):  # add upsampling layers
            mult = 2**(n_downsampling - i)
            output_size = 2**(i + 2)
            if norm_type == 'batch':
                model += [
                    nn.Conv2DTranspose(ngf * mult,
                                       ngf * mult // 2,
                                       kernel_size=4,
                                       stride=2,
                                       padding=1,
                                       bias_attr=use_bias),
                    BatchNorm2D(ngf * mult // 2),
                    nn.ReLU()
                ]
            else:
                model += [
                    nn.Conv2DTranspose(ngf * mult,
                                       int(ngf * mult // 2),
                                       kernel_size=4,
                                       stride=2,
                                       padding=1,
                                       bias_attr=use_bias),
                    norm_layer(int(ngf * mult // 2)),
                    nn.ReLU()
                ]

        output_size = 2**(6)
        model += [
            nn.Conv2DTranspose(ngf,
                               output_nc,
                               kernel_size=4,
                               stride=2,
                               padding=1,
                               bias_attr=use_bias),
            nn.Tanh()
        ]

        self.model = nn.Sequential(*model)
 def __init__(self, in_c, out_c, kernel=(1, 1), stride=(1, 1), padding=(0, 0), groups=1):
     super(ConvBlock, self).__init__()
     self.conv = Conv2D(in_c, out_channels=out_c, kernel_size=kernel, groups=groups, stride=stride, padding=padding)
     self.bn = BatchNorm2D(out_c)
     self.prelu = PReLU(out_c)
Пример #23
0
 def __init__(self):
     super(ModelCase4, self).__init__()
     self.bn1 = BatchNorm2D(3)
     self.ln1 = LayerNorm([3 * 16 * 16])
     self.relu1 = ReLU()
     self.fc1 = paddle.nn.Linear(3 * 16 * 16, 3 * 16 * 16)
Пример #24
0
 def __init__(self):
     super(ModelCase5, self).__init__()
     self.bn1 = BatchNorm2D(255)
Пример #25
0
# See the License for the specific language governing permissions and
# limitations under the License.
'''
 输入数据形状是[N, C, H, W]时的batchnorm示例
 '''

import numpy as np
import paddle
from paddle.nn import BatchNorm2D

# 设置随机数种子,这样可以保证每次运行结果一致
np.random.seed(100)
# 创建数据
data = np.random.rand(2, 3, 3, 3).astype('float32')
# 使用BatchNorm2D计算归一化的输出
# 输入数据维度[N, C, H, W],num_features等于C
bn = BatchNorm2D(num_features=3)
x = paddle.to_tensor(data)
y = bn(x)
print('input of BatchNorm2D Layer: \n {}'.format(x.numpy()))
print('output of BatchNorm2D Layer: \n {}'.format(y.numpy()))

# 取出data中第0通道的数据,
# 使用numpy计算均值、方差及归一化的输出
a = data[:, 0, :, :]
a_mean = a.mean()
a_std = a.std()
b = (a - a_mean) / a_std
print('channel 0 of input data: \n {}'.format(a))
print('std {}, mean {}, \n output: \n {}'.format(a_mean, a_std, b))
Пример #26
0
        self.data = data
        self.transform = vt.Compose([vt.ToTensor()])

    def __getitem__(self, index):
        data = cv2.imread(osp.join(self.data_path, self.data[index][0]))
        data = self.transform(data)
        label = self.data[index][1]
        return data, label

    def __len__(self):
        return len(self.data)


from paddle.nn import Conv2D, BatchNorm2D, ReLU, Softmax, MaxPool2D, Flatten, Linear

ClasModel = paddle.nn.Sequential(Conv2D(3, 6, (3, 3)), BatchNorm2D(6), ReLU(),
                                 Conv2D(6, 6, (3, 3)), BatchNorm2D(6), ReLU(),
                                 MaxPool2D((2, 2)), Conv2D(6, 12, (3, 3)),
                                 BatchNorm2D(12), ReLU(),
                                 Conv2D(12, 12,
                                        (3, 3)), BatchNorm2D(12), ReLU(),
                                 MaxPool2D((2, 2)), Conv2D(12, 8, (3, 3)),
                                 BatchNorm2D(8), ReLU(), Conv2D(8, 8, (3, 3)),
                                 BatchNorm2D(8), ReLU(), MaxPool2D((2, 2)),
                                 Flatten(), Linear(128, 128), ReLU(),
                                 Linear(128, 32), ReLU(), Linear(32, 2),
                                 Softmax())

train_dataset = HumanClasDataset(mode="train")
eval_dataset = HumanClasDataset(mode="eval")