def __init__(self, num_layers, mode='ir', opts=None):
     super(BackboneEncoderUsingLastLayerIntoWPlus, self).__init__()
     print('Using BackboneEncoderUsingLastLayerIntoWPlus')
     assert num_layers in [50, 100,
                           152], 'num_layers should be 50,100, or 152'
     assert mode in ['ir', 'ir_se'], 'mode should be ir or ir_se'
     blocks = get_blocks(num_layers)
     if mode == 'ir':
         unit_module = bottleneck_IR
     elif mode == 'ir_se':
         unit_module = bottleneck_IR_SE
     self.input_layer = Sequential(
         Conv2D(opts.input_nc, 64, (3, 3), 1, 1, bias_attr=False),
         BatchNorm2D(64), PReLU(64))
     self.output_layer_2 = Sequential(BatchNorm2D(512),
                                      paddle.nn.AdaptiveAvgPool2D((7, 7)),
                                      Flatten(), Linear(512 * 7 * 7, 512))
     self.linear = EqualLinear(512, 512 * 18, lr_mul=1)
     modules = []
     for block in blocks:
         for bottleneck in block:
             modules.append(
                 unit_module(bottleneck.in_channel, bottleneck.depth,
                             bottleneck.stride))
     self.body = Sequential(*modules)
 def __init__(self, in_channel, depth, stride):
     super(bottleneck_IR, self).__init__()
     if in_channel == depth:
         self.shortcut_layer = MaxPool2D(1, stride)
     else:
         self.shortcut_layer = Sequential(
             Conv2D(in_channel, depth, (1, 1), stride, bias_attr=False),
             BatchNorm2D(depth))
     self.res_layer = Sequential(
         BatchNorm2D(in_channel),
         Conv2D(in_channel, depth, (3, 3), (1, 1), 1, bias_attr=False),
         PReLU(depth),
         Conv2D(depth, depth, (3, 3), stride, 1, bias_attr=False),
         BatchNorm2D(depth))
예제 #3
0
    def __init__(self,
                 num_channels,
                 num_filters,
                 stride,
                 shortcut=True,
                 name=None,
                 data_format="NCHW"):
        super(BasicBlock, self).__init__()
        self.stride = stride
        bn_name = "bn_" + name[3:] + "_before"
        self._batch_norm = BatchNorm(
            num_channels,
            act=None,
            epsilon=1e-05,
            param_attr=ParamAttr(name=bn_name + "_scale"),
            bias_attr=ParamAttr(bn_name + "_offset"),
            moving_mean_name=bn_name + "_mean",
            moving_variance_name=bn_name + "_variance",
            data_layout=data_format)

        self.conv0 = ConvBNLayer(
            num_channels=num_channels,
            num_filters=num_filters,
            filter_size=3,
            stride=1,
            act=None,
            name=name + "_branch2a",
            data_format=data_format)
        self.prelu = PReLU(num_parameters=1, name=name + "_branch2a_prelu")
        self.conv1 = ConvBNLayer(
            num_channels=num_filters,
            num_filters=num_filters,
            filter_size=3,
            stride=stride,
            act=None,
            name=name + "_branch2b",
            data_format=data_format)

        if shortcut:
            self.short = ConvBNLayer(
                num_channels=num_channels,
                num_filters=num_filters,
                filter_size=1,
                stride=stride,
                act=None,
                name=name + "_branch1",
                data_format=data_format)

        self.shortcut = shortcut
예제 #4
0
    def __init__(self, num_classes=10):
        super(ImperativeLenet, self).__init__()
        conv2d_w1_attr = fluid.ParamAttr(name="conv2d_w_1")
        conv2d_w2_attr = fluid.ParamAttr(name="conv2d_w_2")
        fc_w1_attr = fluid.ParamAttr(name="fc_w_1")
        fc_w2_attr = fluid.ParamAttr(name="fc_w_2")
        fc_w3_attr = fluid.ParamAttr(name="fc_w_3")
        conv2d_b2_attr = fluid.ParamAttr(name="conv2d_b_2")
        fc_b1_attr = fluid.ParamAttr(name="fc_b_1")
        fc_b2_attr = fluid.ParamAttr(name="fc_b_2")
        fc_b3_attr = fluid.ParamAttr(name="fc_b_3")
        self.features = Sequential(
            Conv2D(in_channels=1,
                   out_channels=6,
                   kernel_size=3,
                   stride=1,
                   padding=1,
                   weight_attr=conv2d_w1_attr,
                   bias_attr=False), BatchNorm2D(6), ReLU(),
            MaxPool2D(kernel_size=2, stride=2),
            Conv2D(in_channels=6,
                   out_channels=16,
                   kernel_size=5,
                   stride=1,
                   padding=0,
                   weight_attr=conv2d_w2_attr,
                   bias_attr=conv2d_b2_attr), BatchNorm2D(16), PReLU(),
            MaxPool2D(kernel_size=2, stride=2))

        self.fc = Sequential(
            Linear(in_features=400,
                   out_features=120,
                   weight_attr=fc_w1_attr,
                   bias_attr=fc_b1_attr), LeakyReLU(),
            Linear(in_features=120,
                   out_features=84,
                   weight_attr=fc_w2_attr,
                   bias_attr=fc_b2_attr), Sigmoid(),
            Linear(in_features=84,
                   out_features=num_classes,
                   weight_attr=fc_w3_attr,
                   bias_attr=fc_b3_attr), Softmax())
        self.add = paddle.nn.quant.add()
        self.quant_stub = paddle.nn.quant.QuantStub()
    def __init__(self, num_layers, mode='ir', opts=None):
        super(GradualStyleEncoder, self).__init__()
        assert num_layers in [50, 100,
                              152], 'num_layers should be 50,100, or 152'
        assert mode in ['ir', 'ir_se'], 'mode should be ir or ir_se'
        blocks = get_blocks(num_layers)
        if mode == 'ir':
            unit_module = bottleneck_IR
        elif mode == 'ir_se':
            unit_module = bottleneck_IR_SE
        self.input_layer = Sequential(
            Conv2D(opts.input_nc, 64, (3, 3), 1, 1, bias_attr=False),
            BatchNorm2D(64), PReLU(64))
        modules = []
        for block in blocks:
            for bottleneck in block:
                modules.append(
                    unit_module(bottleneck.in_channel, bottleneck.depth,
                                bottleneck.stride))
        self.body = Sequential(*modules)

        self.styles = nn.LayerList()
        self.style_count = 18
        self.coarse_ind = 3
        self.middle_ind = 7
        for i in range(self.style_count):
            if i < self.coarse_ind:
                style = GradualStyleBlock(512, 512, 16)
            elif i < self.middle_ind:
                style = GradualStyleBlock(512, 512, 32)
            else:
                style = GradualStyleBlock(512, 512, 64)
            self.styles.append(style)
        self.latlayer1 = nn.Conv2D(256,
                                   512,
                                   kernel_size=1,
                                   stride=1,
                                   padding=0)
        self.latlayer2 = nn.Conv2D(128,
                                   512,
                                   kernel_size=1,
                                   stride=1,
                                   padding=0)
 def __init__(self, in_c, out_c, kernel=(1, 1), stride=(1, 1), padding=(0, 0), groups=1):
     super(ConvBlock, self).__init__()
     self.conv = Conv2D(in_c, out_channels=out_c, kernel_size=kernel, groups=groups, stride=stride, padding=padding)
     self.bn = BatchNorm2D(out_c)
     self.prelu = PReLU(out_c)
예제 #7
0
    def __init__(self,
                 layers=50,
                 num_features=512,
                 fc_type='E',
                 dropout=0.4,
                 input_image_channel=3,
                 input_image_width=112,
                 input_image_height=112,
                 data_format="NCHW"):

        super(FresResNet, self).__init__()

        self.layers = layers
        self.data_format = data_format
        self.input_image_channel = input_image_channel

        supported_layers = [50, 100]
        assert layers in supported_layers, \
            "supported layers are {} but input layer is {}".format(
                supported_layers, layers)

        if layers == 50:
            units = [3, 4, 14, 3]
        elif layers == 100:
            units = [3, 13, 30, 3]

        num_channels = [64, 64, 128, 256]
        num_filters = [64, 128, 256, 512]

        self.conv = ConvBNLayer(
            num_channels=self.input_image_channel,
            num_filters=64,
            filter_size=3,
            stride=1,
            act=None,
            name="conv1",
            data_format=self.data_format)
        self.prelu = PReLU(num_parameters=1, name="prelu1")

        self.block_list = paddle.nn.LayerList()
        for block in range(len(units)):
            shortcut = True
            for i in range(units[block]):
                conv_name = "res" + str(block + 2) + chr(97 + i)
                basic_block = self.add_sublayer(
                    conv_name,
                    BasicBlock(
                        num_channels=num_channels[block]
                        if i == 0 else num_filters[block],
                        num_filters=num_filters[block],
                        stride=2 if shortcut else 1,
                        shortcut=shortcut,
                        name=conv_name,
                        data_format=self.data_format))
                self.block_list.append(basic_block)
                shortcut = False

        assert input_image_width % 16 == 0
        assert input_image_height % 16 == 0
        feat_w = input_image_width // 16
        feat_h = input_image_height // 16
        self.fc_channels = num_filters[-1] * feat_w * feat_h
        self.fc = FC(num_filters[-1],
                     self.fc_channels,
                     num_features,
                     fc_type,
                     dropout,
                     name='fc')