コード例 #1
0
ファイル: iresnet.py プロジェクト: bilylee/DCQ
    def __init__(self, input_size, num_layers, out_dim, mode='ir'):
        super(Backbone, self).__init__()
        assert input_size[0] in [
            112, 224
        ], "input_size should be [112, 112] or [224, 224]"
        assert num_layers in [34, 50, 100,
                              152], "num_layers should be 50, 100 or 152"
        assert mode in ['ir', 'ir_se'], "mode should be ir or ir_se"
        blocks = get_blocks(num_layers)
        if mode == 'ir':
            unit_module = BottleneckIR
        elif mode == 'ir_se':
            unit_module = BottleneckIRSE
        self.input_layer = Sequential(
            Conv2D(3, 64, (3, 3), 1, 1, bias_attr=False), BatchNorm2D(64),
            PReLU(64))
        if input_size[0] == 112:
            self.output_layer = Sequential(BatchNorm2D(512), Dropout(),
                                           Flatten(),
                                           Linear(512 * 7 * 7, out_dim),
                                           BatchNorm1D(out_dim))
        else:
            self.output_layer = Sequential(BatchNorm2D(512), Dropout(),
                                           Flatten(),
                                           Linear(512 * 14 * 14, out_dim),
                                           BatchNorm1D(out_dim))

        modules = []
        for block in blocks:
            for bottleneck in block:
                modules.append(
                    unit_module(bottleneck.in_channel, bottleneck.depth,
                                bottleneck.stride))
        self.body = Sequential(*modules)
コード例 #2
0
    def __init__(self, class_num=1000):
        super(AlexNetDY, self).__init__()

        stdv = 1.0 / math.sqrt(3 * 11 * 11)
        self._conv1 = ConvPoolLayer(
            3, 64, 11, 4, 2, stdv, act="relu", name="conv1")
        stdv = 1.0 / math.sqrt(64 * 5 * 5)
        self._conv2 = ConvPoolLayer(
            64, 192, 5, 1, 2, stdv, act="relu", name="conv2")
        stdv = 1.0 / math.sqrt(192 * 3 * 3)
        self._conv3 = Conv2D(
            192,
            384,
            3,
            stride=1,
            padding=1,
            weight_attr=ParamAttr(
                name="conv3_weights", initializer=Uniform(-stdv, stdv)),
            bias_attr=ParamAttr(
                name="conv3_offset", initializer=Uniform(-stdv, stdv)))
        stdv = 1.0 / math.sqrt(384 * 3 * 3)
        self._conv4 = Conv2D(
            384,
            256,
            3,
            stride=1,
            padding=1,
            weight_attr=ParamAttr(
                name="conv4_weights", initializer=Uniform(-stdv, stdv)),
            bias_attr=ParamAttr(
                name="conv4_offset", initializer=Uniform(-stdv, stdv)))
        stdv = 1.0 / math.sqrt(256 * 3 * 3)
        self._conv5 = ConvPoolLayer(
            256, 256, 3, 1, 1, stdv, act="relu", name="conv5")
        stdv = 1.0 / math.sqrt(256 * 6 * 6)

        self._drop1 = Dropout(p=0.5, mode="downscale_in_infer")
        self._fc6 = Linear(
            in_features=256 * 6 * 6,
            out_features=4096,
            weight_attr=ParamAttr(
                name="fc6_weights", initializer=Uniform(-stdv, stdv)),
            bias_attr=ParamAttr(
                name="fc6_offset", initializer=Uniform(-stdv, stdv)))

        self._drop2 = Dropout(p=0.5, mode="downscale_in_infer")
        self._fc7 = Linear(
            in_features=4096,
            out_features=4096,
            weight_attr=ParamAttr(
                name="fc7_weights", initializer=Uniform(-stdv, stdv)),
            bias_attr=ParamAttr(
                name="fc7_offset", initializer=Uniform(-stdv, stdv)))
        self._fc8 = Linear(
            in_features=4096,
            out_features=class_num,
            weight_attr=ParamAttr(
                name="fc8_weights", initializer=Uniform(-stdv, stdv)),
            bias_attr=ParamAttr(
                name="fc8_offset", initializer=Uniform(-stdv, stdv)))
コード例 #3
0
ファイル: se_resnext.py プロジェクト: lvjian0706/PaddleClas
    def __init__(self,
                 num_channels,
                 num_filters,
                 reduction_ratio,
                 name=None,
                 data_format="NCHW"):
        super(SELayer, self).__init__()

        self.data_format = data_format
        self.pool2d_gap = AdaptiveAvgPool2D(1, data_format=self.data_format)

        self._num_channels = num_channels

        med_ch = int(num_channels / reduction_ratio)
        stdv = 1.0 / math.sqrt(num_channels * 1.0)
        self.squeeze = Linear(num_channels,
                              med_ch,
                              weight_attr=ParamAttr(
                                  initializer=Uniform(-stdv, stdv),
                                  name=name + "_sqz_weights"),
                              bias_attr=ParamAttr(name=name + '_sqz_offset'))
        self.relu = nn.ReLU()
        stdv = 1.0 / math.sqrt(med_ch * 1.0)
        self.excitation = Linear(
            med_ch,
            num_filters,
            weight_attr=ParamAttr(initializer=Uniform(-stdv, stdv),
                                  name=name + "_exc_weights"),
            bias_attr=ParamAttr(name=name + '_exc_offset'))
        self.sigmoid = nn.Sigmoid()
コード例 #4
0
    def __init__(self, layers=11, class_dim=1000):
        super(VGGNet, self).__init__()

        self.layers = layers
        self.vgg_configure = {
            11: [1, 1, 2, 2, 2],
            13: [2, 2, 2, 2, 2],
            16: [2, 2, 3, 3, 3],
            19: [2, 2, 4, 4, 4]
        }
        assert self.layers in self.vgg_configure.keys(), \
            "supported layers are {} but input layer is {}".format(
                vgg_configure.keys(), layers)
        self.groups = self.vgg_configure[self.layers]

        self._conv_block_1 = ConvBlock(3, 64, self.groups[0], name="conv1_")
        self._conv_block_2 = ConvBlock(64, 128, self.groups[1], name="conv2_")
        self._conv_block_3 = ConvBlock(128, 256, self.groups[2], name="conv3_")
        self._conv_block_4 = ConvBlock(256, 512, self.groups[3], name="conv4_")
        self._conv_block_5 = ConvBlock(512, 512, self.groups[4], name="conv5_")

        self._drop = Dropout(p=0.5, mode="downscale_in_infer")
        self._fc1 = Linear(7 * 7 * 512,
                           4096,
                           weight_attr=ParamAttr(name="fc6_weights"),
                           bias_attr=ParamAttr(name="fc6_offset"))
        self._fc2 = Linear(4096,
                           4096,
                           weight_attr=ParamAttr(name="fc7_weights"),
                           bias_attr=ParamAttr(name="fc7_offset"))
        self._out = Linear(4096,
                           class_dim,
                           weight_attr=ParamAttr(name="fc8_weights"),
                           bias_attr=ParamAttr(name="fc8_offset"))
コード例 #5
0
ファイル: vgg.py プロジェクト: winter-wang/PaddleClas
    def __init__(self,
                 config,
                 stop_grad_layers=0,
                 class_num=1000,
                 return_patterns=None):
        super().__init__()

        self.stop_grad_layers = stop_grad_layers

        self.conv_block_1 = ConvBlock(3, 64, config[0])
        self.conv_block_2 = ConvBlock(64, 128, config[1])
        self.conv_block_3 = ConvBlock(128, 256, config[2])
        self.conv_block_4 = ConvBlock(256, 512, config[3])
        self.conv_block_5 = ConvBlock(512, 512, config[4])

        self.relu = nn.ReLU()
        self.flatten = nn.Flatten(start_axis=1, stop_axis=-1)

        for idx, block in enumerate([
                self.conv_block_1, self.conv_block_2, self.conv_block_3,
                self.conv_block_4, self.conv_block_5
        ]):
            if self.stop_grad_layers >= idx + 1:
                for param in block.parameters():
                    param.trainable = False

        self.drop = Dropout(p=0.5, mode="downscale_in_infer")
        self.fc1 = Linear(7 * 7 * 512, 4096)
        self.fc2 = Linear(4096, 4096)
        self.fc3 = Linear(4096, class_num)
        if return_patterns is not None:
            self.update_res(return_patterns)
            self.register_forward_post_hook(self._return_dict_hook)
コード例 #6
0
ファイル: Heared.py プロジェクト: zl89298832/Deep_Learning
    def __init__(self):
        # 初始化父类中的一些参数
        super(Regressor, self).__init__()

        # 定义一层全连接层,输入维度是13,输出维度是1
        self.fc1 = Linear(in_features=13, out_features=10)
        self.fc2 = Linear(in_features=10, out_features=10)
        # 定义一层全连接输出层,输出维度是1
        self.fc3 = Linear(in_features=10, out_features=1)
コード例 #7
0
    def __init__(self, num_classes=10):
        super(ImperativeLenetWithSkipQuant, self).__init__()

        conv2d_w1_attr = fluid.ParamAttr(name="conv2d_w_1")
        conv2d_w2_attr = fluid.ParamAttr(name="conv2d_w_2")
        fc_w1_attr = fluid.ParamAttr(name="fc_w_1")
        fc_w2_attr = fluid.ParamAttr(name="fc_w_2")
        fc_w3_attr = fluid.ParamAttr(name="fc_w_3")
        conv2d_b1_attr = fluid.ParamAttr(name="conv2d_b_1")
        conv2d_b2_attr = fluid.ParamAttr(name="conv2d_b_2")
        fc_b1_attr = fluid.ParamAttr(name="fc_b_1")
        fc_b2_attr = fluid.ParamAttr(name="fc_b_2")
        fc_b3_attr = fluid.ParamAttr(name="fc_b_3")
        self.conv2d_0 = Conv2D(in_channels=1,
                               out_channels=6,
                               kernel_size=3,
                               stride=1,
                               padding=1,
                               weight_attr=conv2d_w1_attr,
                               bias_attr=conv2d_b1_attr)
        self.conv2d_0.skip_quant = True

        self.batch_norm_0 = BatchNorm2D(6)
        self.relu_0 = ReLU()
        self.pool2d_0 = MaxPool2D(kernel_size=2, stride=2)
        self.conv2d_1 = Conv2D(in_channels=6,
                               out_channels=16,
                               kernel_size=5,
                               stride=1,
                               padding=0,
                               weight_attr=conv2d_w2_attr,
                               bias_attr=conv2d_b2_attr)
        self.conv2d_1.skip_quant = False

        self.batch_norm_1 = BatchNorm2D(16)
        self.relu6_0 = ReLU6()
        self.pool2d_1 = MaxPool2D(kernel_size=2, stride=2)
        self.linear_0 = Linear(in_features=400,
                               out_features=120,
                               weight_attr=fc_w1_attr,
                               bias_attr=fc_b1_attr)
        self.linear_0.skip_quant = True

        self.leaky_relu_0 = LeakyReLU()
        self.linear_1 = Linear(in_features=120,
                               out_features=84,
                               weight_attr=fc_w2_attr,
                               bias_attr=fc_b2_attr)
        self.linear_1.skip_quant = False

        self.sigmoid_0 = Sigmoid()
        self.linear_2 = Linear(in_features=84,
                               out_features=num_classes,
                               weight_attr=fc_w3_attr,
                               bias_attr=fc_b3_attr)
        self.linear_2.skip_quant = False
        self.softmax_0 = Softmax()
コード例 #8
0
    def __init__(self, num_classes=10):
        super(ImperativeLenet, self).__init__()
        conv2d_w1_attr = fluid.ParamAttr(name="conv2d_w_1")
        conv2d_w2_attr = fluid.ParamAttr(name="conv2d_w_2")
        fc_w1_attr = fluid.ParamAttr(name="fc_w_1")
        fc_w2_attr = fluid.ParamAttr(name="fc_w_2")
        fc_w3_attr = fluid.ParamAttr(name="fc_w_3")
        conv2d_b2_attr = fluid.ParamAttr(name="conv2d_b_2")
        fc_b1_attr = fluid.ParamAttr(name="fc_b_1")
        fc_b2_attr = fluid.ParamAttr(name="fc_b_2")
        fc_b3_attr = fluid.ParamAttr(name="fc_b_3")
        self.features = Sequential(
            Conv2D(
                in_channels=1,
                out_channels=6,
                kernel_size=3,
                stride=1,
                padding=1,
                weight_attr=conv2d_w1_attr,
                bias_attr=False),
            BatchNorm2D(6),
            ReLU(),
            MaxPool2D(
                kernel_size=2, stride=2),
            Conv2D(
                in_channels=6,
                out_channels=16,
                kernel_size=5,
                stride=1,
                padding=0,
                weight_attr=conv2d_w2_attr,
                bias_attr=conv2d_b2_attr),
            BatchNorm2D(16),
            PReLU(),
            MaxPool2D(
                kernel_size=2, stride=2))

        self.fc = Sequential(
            Linear(
                in_features=400,
                out_features=120,
                weight_attr=fc_w1_attr,
                bias_attr=fc_b1_attr),
            LeakyReLU(),
            Linear(
                in_features=120,
                out_features=84,
                weight_attr=fc_w2_attr,
                bias_attr=fc_b2_attr),
            Sigmoid(),
            Linear(
                in_features=84,
                out_features=num_classes,
                weight_attr=fc_w3_attr,
                bias_attr=fc_b3_attr),
            Softmax())
コード例 #9
0
ファイル: modeling.py プロジェクト: northfun/PaddleNLP
    def __init__(self,
                 d_model,
                 nhead,
                 dim_feedforward,
                 dropout=0.1,
                 activation="relu",
                 attn_dropout=None,
                 act_dropout=None,
                 normalize_before=False,
                 weight_attr=None,
                 bias_attr=None,
                 attention_type="bigbird",
                 block_size=1,
                 window_size=3,
                 num_global_blocks=1,
                 num_rand_blocks=1,
                 seed=None):
        self._config = locals()
        self._config.pop("self")
        self._config.pop("__class__", None)  # py3

        super(TransformerEncoderLayer, self).__init__()
        attn_dropout = dropout if attn_dropout is None else attn_dropout
        act_dropout = dropout if act_dropout is None else act_dropout
        self.normalize_before = normalize_before

        weight_attrs = _convert_param_attr_to_list(weight_attr, 2)
        bias_attrs = _convert_param_attr_to_list(bias_attr, 2)

        self.self_attn = MultiHeadAttention(
            d_model,
            nhead,
            dropout=attn_dropout,
            weight_attr=weight_attrs[0],
            bias_attr=bias_attrs[0],
            attention_type=attention_type,
            block_size=block_size,
            window_size=window_size,
            num_global_blocks=num_global_blocks,
            num_rand_blocks=num_rand_blocks,
            seed=seed)
        self.linear1 = Linear(d_model,
                              dim_feedforward,
                              weight_attrs[1],
                              bias_attr=bias_attrs[1])
        self.dropout = Dropout(act_dropout, mode="upscale_in_train")
        self.linear2 = Linear(dim_feedforward,
                              d_model,
                              weight_attrs[1],
                              bias_attr=bias_attrs[1])
        self.norm1 = LayerNorm(d_model, epsilon=1e-12)
        self.norm2 = LayerNorm(d_model, epsilon=1e-12)
        self.dropout1 = Dropout(dropout, mode="upscale_in_train")
        self.dropout2 = Dropout(dropout, mode="upscale_in_train")
        self.activation = getattr(F, activation)
        self.d_model = d_model
コード例 #10
0
ファイル: test_model.py プロジェクト: sandyhouse/Paddle
    def __init__(self, num_classes=10):
        super(LeNetDygraph, self).__init__()
        self.num_classes = num_classes
        self.features = Sequential(Conv2D(1, 6, 3, stride=1,
                                          padding=1), ReLU(),
                                   paddle.fluid.dygraph.Pool2D(2, 'max', 2),
                                   Conv2D(6, 16, 5, stride=1, padding=0),
                                   ReLU(),
                                   paddle.fluid.dygraph.Pool2D(2, 'max', 2))

        if num_classes > 0:
            self.fc = Sequential(Linear(400, 120), Linear(120, 84),
                                 Linear(84, 10))
コード例 #11
0
ファイル: lenet.py プロジェクト: goodcoder-cnn/Paddle
    def __init__(self, num_classes=10, classifier_activation='softmax'):
        super(LeNet, self).__init__()
        self.num_classes = num_classes
        self.features = Sequential(Conv2d(1, 6, 3, stride=1,
                                          padding=1), ReLU(),
                                   Pool2D(2, 'max', 2),
                                   Conv2d(6, 16, 5, stride=1, padding=0),
                                   ReLU(), Pool2D(2, 'max', 2))

        if num_classes > 0:
            self.fc = Sequential(Linear(400, 120), Linear(120, 84),
                                 Linear(84, 10),
                                 Softmax())  #Todo: accept any activation
コード例 #12
0
    def __init__(self, num_classes=10):
        super(ImperativeLenet, self).__init__()
        conv2d_w1_attr = fluid.ParamAttr(name="conv2d_w_1")
        conv2d_w2_attr = fluid.ParamAttr(name="conv2d_w_2")
        fc_w1_attr = fluid.ParamAttr(name="fc_w_1")
        fc_w2_attr = fluid.ParamAttr(name="fc_w_2")
        fc_w3_attr = fluid.ParamAttr(name="fc_w_3")
        conv2d_b1_attr = fluid.ParamAttr(name="conv2d_b_1")
        conv2d_b2_attr = fluid.ParamAttr(name="conv2d_b_2")
        fc_b1_attr = fluid.ParamAttr(name="fc_b_1")
        fc_b2_attr = fluid.ParamAttr(name="fc_b_2")
        fc_b3_attr = fluid.ParamAttr(name="fc_b_3")
        self.features = Sequential(
            Conv2D(
                in_channels=1,
                out_channels=6,
                kernel_size=3,
                stride=1,
                padding=1,
                weight_attr=conv2d_w1_attr,
                bias_attr=conv2d_b1_attr),
            Pool2D(
                pool_size=2, pool_type='max', pool_stride=2),
            Conv2D(
                in_channels=6,
                out_channels=16,
                kernel_size=5,
                stride=1,
                padding=0,
                weight_attr=conv2d_w2_attr,
                bias_attr=conv2d_b2_attr),
            Pool2D(
                pool_size=2, pool_type='max', pool_stride=2))

        self.fc = Sequential(
            Linear(
                in_features=400,
                out_features=120,
                weight_attr=fc_w1_attr,
                bias_attr=fc_b1_attr),
            Linear(
                in_features=120,
                out_features=84,
                weight_attr=fc_w2_attr,
                bias_attr=fc_b2_attr),
            Linear(
                in_features=84,
                out_features=num_classes,
                weight_attr=fc_w3_attr,
                bias_attr=fc_b3_attr),
            Softmax())
コード例 #13
0
    def __init__(self):
        super(ImperativeLinearBn_hook, self).__init__()

        fc_w_attr = paddle.ParamAttr(
            name="linear_weight",
            initializer=paddle.nn.initializer.Constant(value=0.5))

        self.linear = Linear(in_features=10,
                             out_features=10,
                             weight_attr=fc_w_attr)
        self.bn = BatchNorm1D(10)

        forward_pre = self.linear.register_forward_pre_hook(pre_hook)
        forward_post = self.bn.register_forward_post_hook(post_hook)
コード例 #14
0
ファイル: LeNet_PALM.py プロジェクト: zzLoschicos/tutorials
    def __init__(self, num_classes=1):
        super(LeNet_PALM, self).__init__()

        # 创建卷积和池化层块,每个卷积层使用Sigmoid激活函数,后面跟着一个2x2的池化
        self.conv1 = Conv2D(in_channels=3, out_channels=6, kernel_size=5)
        self.max_pool1 = MaxPool2D(kernel_size=2, stride=2)
        self.conv2 = Conv2D(in_channels=6, out_channels=16, kernel_size=5)
        self.max_pool2 = MaxPool2D(kernel_size=2, stride=2)
        # 创建第3个卷积层
        self.conv3 = Conv2D(in_channels=16, out_channels=120, kernel_size=4)
        # 创建全连接层,第一个全连接层的输出神经元个数为64
        self.fc1 = Linear(in_features=300000, out_features=64)
        # 第二个全连接层输出神经元个数为分类标签的类别数
        self.fc2 = Linear(in_features=64, out_features=num_classes)
コード例 #15
0
ファイル: test_model.py プロジェクト: sandyhouse/Paddle
    def __init__(self, num_classes=10):
        super(LeNetListInput, self).__init__()
        self.num_classes = num_classes
        self.cov = Conv2D(1, 6, 3, stride=1, padding=1)
        for param in self.cov.parameters():
            param.trainable = False
        self.features = Sequential(self.cov, ReLU(),
                                   paddle.fluid.dygraph.Pool2D(2, 'max', 2),
                                   Conv2D(6, 16, 5, stride=1, padding=0),
                                   ReLU(),
                                   paddle.fluid.dygraph.Pool2D(2, 'max', 2))

        if num_classes > 0:
            self.fc = Sequential(Linear(400, 120), Linear(120, 84),
                                 Linear(84, 10))
コード例 #16
0
    def __init__(self, num_classes=1000):
        super(AlexNet, self).__init__()
        self.num_classes = num_classes
        stdv = 1.0 / math.sqrt(3 * 11 * 11)
        self._conv1 = ConvPoolLayer(3, 64, 11, 4, 2, stdv, act="relu")
        stdv = 1.0 / math.sqrt(64 * 5 * 5)
        self._conv2 = ConvPoolLayer(64, 192, 5, 1, 2, stdv, act="relu")
        stdv = 1.0 / math.sqrt(192 * 3 * 3)
        self._conv3 = Conv2D(
            192,
            384,
            3,
            stride=1,
            padding=1,
            weight_attr=ParamAttr(initializer=Uniform(-stdv, stdv)),
            bias_attr=ParamAttr(initializer=Uniform(-stdv, stdv)))
        stdv = 1.0 / math.sqrt(384 * 3 * 3)
        self._conv4 = Conv2D(
            384,
            256,
            3,
            stride=1,
            padding=1,
            weight_attr=ParamAttr(initializer=Uniform(-stdv, stdv)),
            bias_attr=ParamAttr(initializer=Uniform(-stdv, stdv)))
        stdv = 1.0 / math.sqrt(256 * 3 * 3)
        self._conv5 = ConvPoolLayer(256, 256, 3, 1, 1, stdv, act="relu")

        if self.num_classes > 0:
            stdv = 1.0 / math.sqrt(256 * 6 * 6)
            self._drop1 = Dropout(p=0.5, mode="downscale_in_infer")
            self._fc6 = Linear(
                in_features=256 * 6 * 6,
                out_features=4096,
                weight_attr=ParamAttr(initializer=Uniform(-stdv, stdv)),
                bias_attr=ParamAttr(initializer=Uniform(-stdv, stdv)))

            self._drop2 = Dropout(p=0.5, mode="downscale_in_infer")
            self._fc7 = Linear(
                in_features=4096,
                out_features=4096,
                weight_attr=ParamAttr(initializer=Uniform(-stdv, stdv)),
                bias_attr=ParamAttr(initializer=Uniform(-stdv, stdv)))
            self._fc8 = Linear(
                in_features=4096,
                out_features=num_classes,
                weight_attr=ParamAttr(initializer=Uniform(-stdv, stdv)),
                bias_attr=ParamAttr(initializer=Uniform(-stdv, stdv)))
コード例 #17
0
    def __init__(self, input_size, block, layers, zero_init_residual=True):
        super(ResNet, self).__init__()
        assert input_size[0] in [
            112, 224
        ], "input_size should be [112, 112] or [224, 224]"
        self.inplanes = 64
        self.zero_init_residual = zero_init_residual
        self.conv1 = Conv2D(3,
                            64,
                            kernel_size=7,
                            stride=2,
                            padding=3,
                            weight_attr=paddle.ParamAttr(
                                initializer=nn.initializer.KaimingNormal()))
        self.bn1 = BatchNorm2D(
            64,
            weight_attr=paddle.ParamAttr(
                initializer=nn.initializer.Constant(value=1),
                regularizer=paddle.regularizer.L1Decay(0.0)),
            bias_attr=paddle.ParamAttr(
                initializer=nn.initializer.Constant(value=0),
                regularizer=paddle.regularizer.L1Decay(0.0)))
        self.relu = ReLU()
        self.maxpool = MaxPool2D(kernel_size=3, stride=2, padding=1)
        self.layer1 = self._make_layer(block, 64, layers[0])
        self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
        self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
        self.layer4 = self._make_layer(block, 512, layers[3], stride=2)

        self.bn_o1 = BatchNorm2D(
            2048,
            weight_attr=paddle.ParamAttr(
                initializer=nn.initializer.Constant(value=0.0),
                regularizer=paddle.regularizer.L1Decay(0.0)),
            bias_attr=paddle.ParamAttr(
                regularizer=paddle.regularizer.L1Decay(0.0)))
        self.dropout = Dropout()
        if input_size[0] == 112:
            self.fc = Linear(2048 * 4 * 4, 512)
        else:
            self.fc = Linear(2048 * 8 * 8, 512)
        self.bn_o2 = BatchNorm1D(
            512,
            weight_attr=paddle.ParamAttr(
                initializer=nn.initializer.Constant(value=0.0),
                regularizer=paddle.regularizer.L1Decay(0.0)),
            bias_attr=paddle.ParamAttr(
                regularizer=paddle.regularizer.L1Decay(0.0)))
コード例 #18
0
ファイル: SimpleNet.py プロジェクト: zzLoschicos/tutorials
 def __init__(self, num_classes=1):
     # super(SimpleNet, self).__init__(name_scope)
     self.conv1 = Conv2D(in_channels=3,
                         out_channels=6,
                         kernel_size=5,
                         stride=1,
                         padding=2)
     self.max_pool1 = MaxPool2D(kernel_size=2, tride=2)
     self.conv2 = Conv2D(in_channels=6,
                         out_channels=16,
                         kernel_size=5,
                         stride=1,
                         padding=2)
     self.max_pool2 = MaxPool2D(kernel_size=2, tride=2)
     self.fc1 = Linear(in_features=50176, out_features=64)
     self.fc2 = Linear(in_features=64, out_features=num_classes)
コード例 #19
0
 def __init__(self, num_layers, mode='ir', opts=None):
     super(BackboneEncoderUsingLastLayerIntoWPlus, self).__init__()
     print('Using BackboneEncoderUsingLastLayerIntoWPlus')
     assert num_layers in [50, 100,
                           152], 'num_layers should be 50,100, or 152'
     assert mode in ['ir', 'ir_se'], 'mode should be ir or ir_se'
     blocks = get_blocks(num_layers)
     if mode == 'ir':
         unit_module = bottleneck_IR
     elif mode == 'ir_se':
         unit_module = bottleneck_IR_SE
     self.input_layer = Sequential(
         Conv2D(opts.input_nc, 64, (3, 3), 1, 1, bias_attr=False),
         BatchNorm2D(64), PReLU(64))
     self.output_layer_2 = Sequential(BatchNorm2D(512),
                                      paddle.nn.AdaptiveAvgPool2D((7, 7)),
                                      Flatten(), Linear(512 * 7 * 7, 512))
     self.linear = EqualLinear(512, 512 * 18, lr_mul=1)
     modules = []
     for block in blocks:
         for bottleneck in block:
             modules.append(
                 unit_module(bottleneck.in_channel, bottleneck.depth,
                             bottleneck.stride))
     self.body = Sequential(*modules)
コード例 #20
0
    def __init__(self,
                 num_classes,
                 in_channels,
                 drop_ratio=0.5,
                 std=0.001,
                 data_format="NCHW",
                 **kwargs):
        super().__init__(num_classes,
                         in_channels,
                         drop_ratio=drop_ratio,
                         std=std,
                         data_format=data_format,
                         **kwargs)

        self.fc = Linear(self.in_channels,
                         self.num_classes,
                         weight_attr=ParamAttr(learning_rate=5.0,
                                               regularizer=L2Decay(1e-4)),
                         bias_attr=ParamAttr(learning_rate=10.0,
                                             regularizer=L2Decay(0.0)))

        assert (data_format in [
            'NCHW', 'NHWC'
        ]), f"data_format must be 'NCHW' or 'NHWC', but got {data_format}"

        self.data_format = data_format

        self.stdv = std
コード例 #21
0
ファイル: pptsn_head.py プロジェクト: chajchaj/PaddleVideo
    def __init__(self,
                 num_classes,
                 in_channels,
                 loss_cfg=dict(name='CrossEntropyLoss'),
                 drop_ratio=0.4,
                 std=0.01,
                 data_format="NCHW",
                 fclr5=True,
                 **kwargs):

        super().__init__(num_classes, in_channels, loss_cfg, **kwargs)
        self.drop_ratio = drop_ratio
        self.std = std

        # NOTE: global pool performance
        self.avgpool2d = AdaptiveAvgPool2D((1, 1), data_format=data_format)

        if self.drop_ratio != 0:
            self.dropout = Dropout(p=self.drop_ratio)
        else:
            self.dropout = None
        self.fc = Linear(
            self.in_channels,
            self.num_classes,
            weight_attr=ParamAttr(learning_rate=5.0 if fclr5 else 1.0,
                                  regularizer=L2Decay(1e-4)),
            bias_attr=ParamAttr(learning_rate=10.0 if fclr5 else 1.0,
                                regularizer=L2Decay(0.0)))
コード例 #22
0
    def __init__(self, class_dim=1000):
        super(InceptionV4DY, self).__init__()
        self._inception_stem = InceptionStem()

        self._inceptionA_1 = InceptionA(name="1")
        self._inceptionA_2 = InceptionA(name="2")
        self._inceptionA_3 = InceptionA(name="3")
        self._inceptionA_4 = InceptionA(name="4")
        self._reductionA = ReductionA()

        self._inceptionB_1 = InceptionB(name="1")
        self._inceptionB_2 = InceptionB(name="2")
        self._inceptionB_3 = InceptionB(name="3")
        self._inceptionB_4 = InceptionB(name="4")
        self._inceptionB_5 = InceptionB(name="5")
        self._inceptionB_6 = InceptionB(name="6")
        self._inceptionB_7 = InceptionB(name="7")
        self._reductionB = ReductionB()

        self._inceptionC_1 = InceptionC(name="1")
        self._inceptionC_2 = InceptionC(name="2")
        self._inceptionC_3 = InceptionC(name="3")

        self.avg_pool = AdaptiveAvgPool2D(1)
        self._drop = Dropout(p=0.2, mode="downscale_in_infer")
        stdv = 1.0 / math.sqrt(1536 * 1.0)
        self.out = Linear(
            1536,
            class_dim,
            weight_attr=ParamAttr(
                initializer=Uniform(-stdv, stdv), name="final_fc_weights"),
            bias_attr=ParamAttr(name="final_fc_offset"))
コード例 #23
0
 def __init__(self,
              embed_dim,
              num_heads,
              dropout=0.,
              bias=True,
              add_bias_kv=False,
              add_zero_attn=False):
     super(MultiheadAttention, self).__init__()
     self.embed_dim = embed_dim
     self.num_heads = num_heads
     self.dropout = dropout
     self.head_dim = embed_dim // num_heads
     assert self.head_dim * num_heads == self.embed_dim, "embed_dim must be divisible by num_heads"
     self.scaling = self.head_dim**-0.5
     self.out_proj = Linear(embed_dim, embed_dim, bias_attr=bias)
     self._reset_parameters()
     self.conv1 = paddle.nn.Conv2D(in_channels=embed_dim,
                                   out_channels=embed_dim,
                                   kernel_size=(1, 1))
     self.conv2 = paddle.nn.Conv2D(in_channels=embed_dim,
                                   out_channels=embed_dim,
                                   kernel_size=(1, 1))
     self.conv3 = paddle.nn.Conv2D(in_channels=embed_dim,
                                   out_channels=embed_dim,
                                   kernel_size=(1, 1))
コード例 #24
0
ファイル: googlenet.py プロジェクト: wuhuachaocoding/Paddle
    def __init__(self, num_classes=1000, with_pool=True):
        super(GoogLeNet, self).__init__()
        self.num_classes = num_classes
        self.with_pool = with_pool

        self._conv = ConvLayer(3, 64, 7, 2)
        self._pool = MaxPool2D(kernel_size=3, stride=2)
        self._conv_1 = ConvLayer(64, 64, 1)
        self._conv_2 = ConvLayer(64, 192, 3)

        self._ince3a = Inception(192, 192, 64, 96, 128, 16, 32, 32)
        self._ince3b = Inception(256, 256, 128, 128, 192, 32, 96, 64)

        self._ince4a = Inception(480, 480, 192, 96, 208, 16, 48, 64)
        self._ince4b = Inception(512, 512, 160, 112, 224, 24, 64, 64)
        self._ince4c = Inception(512, 512, 128, 128, 256, 24, 64, 64)
        self._ince4d = Inception(512, 512, 112, 144, 288, 32, 64, 64)
        self._ince4e = Inception(528, 528, 256, 160, 320, 32, 128, 128)

        self._ince5a = Inception(832, 832, 256, 160, 320, 32, 128, 128)
        self._ince5b = Inception(832, 832, 384, 192, 384, 48, 128, 128)

        if with_pool:
            # out
            self._pool_5 = AdaptiveAvgPool2D(1)
            # out1
            self._pool_o1 = AvgPool2D(kernel_size=5, stride=3)
            # out2
            self._pool_o2 = AvgPool2D(kernel_size=5, stride=3)

        if num_classes > 0:
            # out
            self._drop = Dropout(p=0.4, mode="downscale_in_infer")
            self._fc_out = Linear(
                1024, num_classes, weight_attr=xavier(1024, 1))

            # out1
            self._conv_o1 = ConvLayer(512, 128, 1)
            self._fc_o1 = Linear(1152, 1024, weight_attr=xavier(2048, 1))
            self._drop_o1 = Dropout(p=0.7, mode="downscale_in_infer")
            self._out1 = Linear(1024, num_classes, weight_attr=xavier(1024, 1))

            # out2
            self._conv_o2 = ConvLayer(528, 128, 1)
            self._fc_o2 = Linear(1152, 1024, weight_attr=xavier(2048, 1))
            self._drop_o2 = Dropout(p=0.7, mode="downscale_in_infer")
            self._out2 = Linear(1024, num_classes, weight_attr=xavier(1024, 1))
コード例 #25
0
ファイル: resnext.py プロジェクト: wuhuachaocoding/Paddle
    def __init__(self,
                 depth=50,
                 cardinality=32,
                 num_classes=1000,
                 with_pool=True):
        super(ResNeXt, self).__init__()

        self.depth = depth
        self.cardinality = cardinality
        self.num_classes = num_classes
        self.with_pool = with_pool

        supported_depth = [50, 101, 152]
        assert depth in supported_depth, \
            "supported layers are {} but input layer is {}".format(
                supported_depth, depth)
        supported_cardinality = [32, 64]
        assert cardinality in supported_cardinality, \
            "supported cardinality is {} but input cardinality is {}" \
            .format(supported_cardinality, cardinality)
        layer_cfg = {50: [3, 4, 6, 3], 101: [3, 4, 23, 3], 152: [3, 8, 36, 3]}
        layers = layer_cfg[depth]
        num_channels = [64, 256, 512, 1024]
        num_filters = [128, 256, 512, 1024
                       ] if cardinality == 32 else [256, 512, 1024, 2048]

        self.conv = ConvBNLayer(num_channels=3,
                                num_filters=64,
                                filter_size=7,
                                stride=2,
                                act='relu')
        self.pool2d_max = MaxPool2D(kernel_size=3, stride=2, padding=1)

        self.block_list = []
        for block in range(len(layers)):
            shortcut = False
            for i in range(layers[block]):
                bottleneck_block = self.add_sublayer(
                    'bb_%d_%d' % (block, i),
                    BottleneckBlock(num_channels=num_channels[block]
                                    if i == 0 else num_filters[block] *
                                    int(64 // self.cardinality),
                                    num_filters=num_filters[block],
                                    stride=2 if i == 0 and block != 0 else 1,
                                    cardinality=self.cardinality,
                                    shortcut=shortcut))
                self.block_list.append(bottleneck_block)
                shortcut = True

        if with_pool:
            self.pool2d_avg = AdaptiveAvgPool2D(1)

        if num_classes > 0:
            self.pool2d_avg_channels = num_channels[-1] * 2
            stdv = 1.0 / math.sqrt(self.pool2d_avg_channels * 1.0)
            self.out = Linear(
                self.pool2d_avg_channels,
                num_classes,
                weight_attr=ParamAttr(initializer=Uniform(-stdv, stdv)))
コード例 #26
0
ファイル: Number.py プロジェクト: zl89298832/Deep_Learning
 def __init__(self, num_classes=1):
     super(MNIST, self).__init__()
     # 创建卷积和池化层
     # 创建第1个卷积层
     self.conv1 = Conv2D(in_channels=1, out_channels=6, kernel_size=5)
     self.max_pool1 = MaxPool2D(kernel_size=2, stride=2)
     # 尺寸的逻辑:池化层未改变通道数;当前通道数为6
     # 创建第2个卷积层
     self.conv2 = Conv2D(in_channels=6, out_channels=16, kernel_size=5)
     self.max_pool2 = MaxPool2D(kernel_size=2, stride=2)
     # 创建第3个卷积层
     self.conv3 = Conv2D(in_channels=16, out_channels=120, kernel_size=4)
     # 尺寸的逻辑:输入层将数据拉平[B,C,H,W] -> [B,C*H*W]
     # 输入size是[28,28],经过三次卷积和两次池化之后,C*H*W等于120
     self.fc1 = Linear(in_features=120, out_features=64)
     # 创建全连接层,第一个全连接层的输出神经元个数为64, 第二个全连接层输出神经元个数为分类标签的类别数
     self.fc2 = Linear(in_features=64, out_features=num_classes)
コード例 #27
0
    def __init__(self, class_dim=1000, scale=1.0, prefix_name="", **args):
        super(MobileNet, self).__init__()
        self.scale = scale
        self.class_dim = class_dim

        bottleneck_params_list = [
            (1, 16, 1, 1),
            (6, 24, 2, 2),
            (6, 32, 3, 2),
            (6, 64, 4, 2),
            (6, 96, 3, 1),
            (6, 160, 3, 2),
            (6, 320, 1, 1),
        ]

        self.conv1 = ConvBNLayer(
            num_channels=3,
            num_filters=int(32 * scale),
            filter_size=3,
            stride=2,
            padding=1,
            name=prefix_name + "conv1_1")

        self.block_list = []
        i = 1
        in_c = int(32 * scale)
        for layer_setting in bottleneck_params_list:
            t, c, n, s = layer_setting
            i += 1
            block = self.add_sublayer(
                prefix_name + "conv" + str(i),
                sublayer=InvresiBlocks(
                    in_c=in_c,
                    t=t,
                    c=int(c * scale),
                    n=n,
                    s=s,
                    name=prefix_name + "conv" + str(i)))
            self.block_list.append(block)
            in_c = int(c * scale)

        self.out_c = int(1280 * scale) if scale > 1.0 else 1280
        self.conv9 = ConvBNLayer(
            num_channels=in_c,
            num_filters=self.out_c,
            filter_size=1,
            stride=1,
            padding=0,
            name=prefix_name + "conv9")

        self.pool2d_avg = AdaptiveAvgPool2D(1)

        self.out = Linear(
            self.out_c,
            class_dim,
            weight_attr=ParamAttr(name=prefix_name + "fc10_weights"),
            bias_attr=ParamAttr(name=prefix_name + "fc10_offset"))
コード例 #28
0
    def __init__(self, num_channels, num_filters, reduction_ratio, name=None):
        super(SELayer, self).__init__()

        self.pool2d_gap = AdaptiveAvgPool2D(1)

        self._num_channels = num_channels

        med_ch = int(num_channels / reduction_ratio)
        stdv = 1.0 / math.sqrt(num_channels * 1.0)
        self.squeeze = Linear(
            num_channels,
            med_ch,
            weight_attr=ParamAttr(initializer=Uniform(-stdv, stdv)))

        stdv = 1.0 / math.sqrt(med_ch * 1.0)
        self.excitation = Linear(
            med_ch,
            num_filters,
            weight_attr=ParamAttr(initializer=Uniform(-stdv, stdv)))
コード例 #29
0
ファイル: ghostnet.py プロジェクト: lvjian0706/PaddleClas
 def __init__(self, num_channels, reduction_ratio=4, name=None):
     super(SEBlock, self).__init__()
     self.pool2d_gap = AdaptiveAvgPool2D(1)
     self._num_channels = num_channels
     stdv = 1.0 / math.sqrt(num_channels * 1.0)
     med_ch = num_channels // reduction_ratio
     self.squeeze = Linear(num_channels,
                           med_ch,
                           weight_attr=ParamAttr(initializer=Uniform(
                               -stdv, stdv),
                                                 name=name + "_1_weights"),
                           bias_attr=ParamAttr(name=name + "_1_offset"))
     stdv = 1.0 / math.sqrt(med_ch * 1.0)
     self.excitation = Linear(med_ch,
                              num_channels,
                              weight_attr=ParamAttr(
                                  initializer=Uniform(-stdv, stdv),
                                  name=name + "_2_weights"),
                              bias_attr=ParamAttr(name=name + "_2_offset"))
コード例 #30
0
    def __init__(self,
                 num_classes,
                 in_channels,
                 loss_cfg=dict(name='CrossEntropyLoss'),
                 std=0.02,
                 **kwargs):

        super().__init__(num_classes, in_channels, loss_cfg, **kwargs)
        self.std = std
        self.fc = Linear(self.in_channels, self.num_classes)