Ejemplo n.º 1
0
    def __init__(self, input_size, num_layers, out_dim, mode='ir'):
        super(Backbone, self).__init__()
        assert input_size[0] in [
            112, 224
        ], "input_size should be [112, 112] or [224, 224]"
        assert num_layers in [34, 50, 100,
                              152], "num_layers should be 50, 100 or 152"
        assert mode in ['ir', 'ir_se'], "mode should be ir or ir_se"
        blocks = get_blocks(num_layers)
        if mode == 'ir':
            unit_module = BottleneckIR
        elif mode == 'ir_se':
            unit_module = BottleneckIRSE
        self.input_layer = Sequential(
            Conv2D(3, 64, (3, 3), 1, 1, bias_attr=False), BatchNorm2D(64),
            PReLU(64))
        if input_size[0] == 112:
            self.output_layer = Sequential(BatchNorm2D(512), Dropout(),
                                           Flatten(),
                                           Linear(512 * 7 * 7, out_dim),
                                           BatchNorm1D(out_dim))
        else:
            self.output_layer = Sequential(BatchNorm2D(512), Dropout(),
                                           Flatten(),
                                           Linear(512 * 14 * 14, out_dim),
                                           BatchNorm1D(out_dim))

        modules = []
        for block in blocks:
            for bottleneck in block:
                modules.append(
                    unit_module(bottleneck.in_channel, bottleneck.depth,
                                bottleneck.stride))
        self.body = Sequential(*modules)
Ejemplo n.º 2
0
    def __init__(self,
                 d_model,
                 nhead,
                 dim_feedforward=2048,
                 attention_dropout_rate=0.0,
                 residual_dropout_rate=0.1):
        super(TransformerDecoderLayer, self).__init__()
        self.self_attn = MultiheadAttention(d_model,
                                            nhead,
                                            dropout=attention_dropout_rate)
        self.multihead_attn = MultiheadAttention(
            d_model, nhead, dropout=attention_dropout_rate)

        self.conv1 = Conv2D(in_channels=d_model,
                            out_channels=dim_feedforward,
                            kernel_size=(1, 1))
        self.conv2 = Conv2D(in_channels=dim_feedforward,
                            out_channels=d_model,
                            kernel_size=(1, 1))

        self.norm1 = LayerNorm(d_model)
        self.norm2 = LayerNorm(d_model)
        self.norm3 = LayerNorm(d_model)
        self.dropout1 = Dropout(residual_dropout_rate)
        self.dropout2 = Dropout(residual_dropout_rate)
        self.dropout3 = Dropout(residual_dropout_rate)
Ejemplo n.º 3
0
    def __init__(self, class_num=1000):
        super(AlexNetDY, self).__init__()

        stdv = 1.0 / math.sqrt(3 * 11 * 11)
        self._conv1 = ConvPoolLayer(
            3, 64, 11, 4, 2, stdv, act="relu", name="conv1")
        stdv = 1.0 / math.sqrt(64 * 5 * 5)
        self._conv2 = ConvPoolLayer(
            64, 192, 5, 1, 2, stdv, act="relu", name="conv2")
        stdv = 1.0 / math.sqrt(192 * 3 * 3)
        self._conv3 = Conv2D(
            192,
            384,
            3,
            stride=1,
            padding=1,
            weight_attr=ParamAttr(
                name="conv3_weights", initializer=Uniform(-stdv, stdv)),
            bias_attr=ParamAttr(
                name="conv3_offset", initializer=Uniform(-stdv, stdv)))
        stdv = 1.0 / math.sqrt(384 * 3 * 3)
        self._conv4 = Conv2D(
            384,
            256,
            3,
            stride=1,
            padding=1,
            weight_attr=ParamAttr(
                name="conv4_weights", initializer=Uniform(-stdv, stdv)),
            bias_attr=ParamAttr(
                name="conv4_offset", initializer=Uniform(-stdv, stdv)))
        stdv = 1.0 / math.sqrt(256 * 3 * 3)
        self._conv5 = ConvPoolLayer(
            256, 256, 3, 1, 1, stdv, act="relu", name="conv5")
        stdv = 1.0 / math.sqrt(256 * 6 * 6)

        self._drop1 = Dropout(p=0.5, mode="downscale_in_infer")
        self._fc6 = Linear(
            in_features=256 * 6 * 6,
            out_features=4096,
            weight_attr=ParamAttr(
                name="fc6_weights", initializer=Uniform(-stdv, stdv)),
            bias_attr=ParamAttr(
                name="fc6_offset", initializer=Uniform(-stdv, stdv)))

        self._drop2 = Dropout(p=0.5, mode="downscale_in_infer")
        self._fc7 = Linear(
            in_features=4096,
            out_features=4096,
            weight_attr=ParamAttr(
                name="fc7_weights", initializer=Uniform(-stdv, stdv)),
            bias_attr=ParamAttr(
                name="fc7_offset", initializer=Uniform(-stdv, stdv)))
        self._fc8 = Linear(
            in_features=4096,
            out_features=class_num,
            weight_attr=ParamAttr(
                name="fc8_weights", initializer=Uniform(-stdv, stdv)),
            bias_attr=ParamAttr(
                name="fc8_offset", initializer=Uniform(-stdv, stdv)))
Ejemplo n.º 4
0
    def __init__(self,
                 d_model,
                 nhead,
                 dim_feedforward,
                 dropout=0.1,
                 activation="relu",
                 attn_dropout=None,
                 act_dropout=None,
                 normalize_before=False,
                 weight_attr=None,
                 bias_attr=None,
                 attention_type="bigbird",
                 block_size=1,
                 window_size=3,
                 num_global_blocks=1,
                 num_rand_blocks=1,
                 seed=None):
        self._config = locals()
        self._config.pop("self")
        self._config.pop("__class__", None)  # py3

        super(TransformerEncoderLayer, self).__init__()
        attn_dropout = dropout if attn_dropout is None else attn_dropout
        act_dropout = dropout if act_dropout is None else act_dropout
        self.normalize_before = normalize_before

        weight_attrs = _convert_param_attr_to_list(weight_attr, 2)
        bias_attrs = _convert_param_attr_to_list(bias_attr, 2)

        self.self_attn = MultiHeadAttention(
            d_model,
            nhead,
            dropout=attn_dropout,
            weight_attr=weight_attrs[0],
            bias_attr=bias_attrs[0],
            attention_type=attention_type,
            block_size=block_size,
            window_size=window_size,
            num_global_blocks=num_global_blocks,
            num_rand_blocks=num_rand_blocks,
            seed=seed)
        self.linear1 = Linear(d_model,
                              dim_feedforward,
                              weight_attrs[1],
                              bias_attr=bias_attrs[1])
        self.dropout = Dropout(act_dropout, mode="upscale_in_train")
        self.linear2 = Linear(dim_feedforward,
                              d_model,
                              weight_attrs[1],
                              bias_attr=bias_attrs[1])
        self.norm1 = LayerNorm(d_model, epsilon=1e-12)
        self.norm2 = LayerNorm(d_model, epsilon=1e-12)
        self.dropout1 = Dropout(dropout, mode="upscale_in_train")
        self.dropout2 = Dropout(dropout, mode="upscale_in_train")
        self.activation = getattr(F, activation)
        self.d_model = d_model
Ejemplo n.º 5
0
    def __init__(self, num_classes=1000):
        super(AlexNet, self).__init__()
        self.num_classes = num_classes
        stdv = 1.0 / math.sqrt(3 * 11 * 11)
        self._conv1 = ConvPoolLayer(3, 64, 11, 4, 2, stdv, act="relu")
        stdv = 1.0 / math.sqrt(64 * 5 * 5)
        self._conv2 = ConvPoolLayer(64, 192, 5, 1, 2, stdv, act="relu")
        stdv = 1.0 / math.sqrt(192 * 3 * 3)
        self._conv3 = Conv2D(
            192,
            384,
            3,
            stride=1,
            padding=1,
            weight_attr=ParamAttr(initializer=Uniform(-stdv, stdv)),
            bias_attr=ParamAttr(initializer=Uniform(-stdv, stdv)))
        stdv = 1.0 / math.sqrt(384 * 3 * 3)
        self._conv4 = Conv2D(
            384,
            256,
            3,
            stride=1,
            padding=1,
            weight_attr=ParamAttr(initializer=Uniform(-stdv, stdv)),
            bias_attr=ParamAttr(initializer=Uniform(-stdv, stdv)))
        stdv = 1.0 / math.sqrt(256 * 3 * 3)
        self._conv5 = ConvPoolLayer(256, 256, 3, 1, 1, stdv, act="relu")

        if self.num_classes > 0:
            stdv = 1.0 / math.sqrt(256 * 6 * 6)
            self._drop1 = Dropout(p=0.5, mode="downscale_in_infer")
            self._fc6 = Linear(
                in_features=256 * 6 * 6,
                out_features=4096,
                weight_attr=ParamAttr(initializer=Uniform(-stdv, stdv)),
                bias_attr=ParamAttr(initializer=Uniform(-stdv, stdv)))

            self._drop2 = Dropout(p=0.5, mode="downscale_in_infer")
            self._fc7 = Linear(
                in_features=4096,
                out_features=4096,
                weight_attr=ParamAttr(initializer=Uniform(-stdv, stdv)),
                bias_attr=ParamAttr(initializer=Uniform(-stdv, stdv)))
            self._fc8 = Linear(
                in_features=4096,
                out_features=num_classes,
                weight_attr=ParamAttr(initializer=Uniform(-stdv, stdv)),
                bias_attr=ParamAttr(initializer=Uniform(-stdv, stdv)))
Ejemplo n.º 6
0
    def __init__(self,
                 num_classes,
                 in_channels,
                 loss_cfg=dict(name='CrossEntropyLoss'),
                 drop_ratio=0.4,
                 std=0.01,
                 data_format="NCHW",
                 fclr5=True,
                 **kwargs):

        super().__init__(num_classes, in_channels, loss_cfg, **kwargs)
        self.drop_ratio = drop_ratio
        self.std = std

        # NOTE: global pool performance
        self.avgpool2d = AdaptiveAvgPool2D((1, 1), data_format=data_format)

        if self.drop_ratio != 0:
            self.dropout = Dropout(p=self.drop_ratio)
        else:
            self.dropout = None
        self.fc = Linear(
            self.in_channels,
            self.num_classes,
            weight_attr=ParamAttr(learning_rate=5.0 if fclr5 else 1.0,
                                  regularizer=L2Decay(1e-4)),
            bias_attr=ParamAttr(learning_rate=10.0 if fclr5 else 1.0,
                                regularizer=L2Decay(0.0)))
Ejemplo n.º 7
0
    def __init__(self, layers=11, class_dim=1000):
        super(VGGNet, self).__init__()

        self.layers = layers
        self.vgg_configure = {
            11: [1, 1, 2, 2, 2],
            13: [2, 2, 2, 2, 2],
            16: [2, 2, 3, 3, 3],
            19: [2, 2, 4, 4, 4]
        }
        assert self.layers in self.vgg_configure.keys(), \
            "supported layers are {} but input layer is {}".format(
                vgg_configure.keys(), layers)
        self.groups = self.vgg_configure[self.layers]

        self._conv_block_1 = ConvBlock(3, 64, self.groups[0], name="conv1_")
        self._conv_block_2 = ConvBlock(64, 128, self.groups[1], name="conv2_")
        self._conv_block_3 = ConvBlock(128, 256, self.groups[2], name="conv3_")
        self._conv_block_4 = ConvBlock(256, 512, self.groups[3], name="conv4_")
        self._conv_block_5 = ConvBlock(512, 512, self.groups[4], name="conv5_")

        self._drop = Dropout(p=0.5, mode="downscale_in_infer")
        self._fc1 = Linear(7 * 7 * 512,
                           4096,
                           weight_attr=ParamAttr(name="fc6_weights"),
                           bias_attr=ParamAttr(name="fc6_offset"))
        self._fc2 = Linear(4096,
                           4096,
                           weight_attr=ParamAttr(name="fc7_weights"),
                           bias_attr=ParamAttr(name="fc7_offset"))
        self._out = Linear(4096,
                           class_dim,
                           weight_attr=ParamAttr(name="fc8_weights"),
                           bias_attr=ParamAttr(name="fc8_offset"))
Ejemplo n.º 8
0
    def __init__(self, num_classes=59, backbone='resnet50'):
        super(PSPNet, self).__init__()

        res = ResNet101(pretrained=False)
        # stem: res.conv, res.pool2d_max
        self.layer0 = Sequential(
            res.conv,
            res.pool2d_max
        )
        self.layer1 = res.layer1
        self.layer2 = res.layer2
        self.layer3 = res.layer3
        self.layer4 = res.layer4

        num_channels = 2048
        # psp: 2048 -> 2048*2
        self.pspmoduls = PSPModule(num_channels, [1,2,3,6])
        num_channels *= 2
        # cls: 2048*2 -> 512 -> num_classes
        self.classifier = Sequential(
            Conv2D(num_channels,512,kernel_size=3,padding=1),
            BatchNorm(512,act='relu'),
            Dropout(0.1),
            Conv2D(512,num_classes,kernel_size=1)
        )
Ejemplo n.º 9
0
    def __init__(self, class_dim=1000):
        super(InceptionV4DY, self).__init__()
        self._inception_stem = InceptionStem()

        self._inceptionA_1 = InceptionA(name="1")
        self._inceptionA_2 = InceptionA(name="2")
        self._inceptionA_3 = InceptionA(name="3")
        self._inceptionA_4 = InceptionA(name="4")
        self._reductionA = ReductionA()

        self._inceptionB_1 = InceptionB(name="1")
        self._inceptionB_2 = InceptionB(name="2")
        self._inceptionB_3 = InceptionB(name="3")
        self._inceptionB_4 = InceptionB(name="4")
        self._inceptionB_5 = InceptionB(name="5")
        self._inceptionB_6 = InceptionB(name="6")
        self._inceptionB_7 = InceptionB(name="7")
        self._reductionB = ReductionB()

        self._inceptionC_1 = InceptionC(name="1")
        self._inceptionC_2 = InceptionC(name="2")
        self._inceptionC_3 = InceptionC(name="3")

        self.avg_pool = AdaptiveAvgPool2D(1)
        self._drop = Dropout(p=0.2, mode="downscale_in_infer")
        stdv = 1.0 / math.sqrt(1536 * 1.0)
        self.out = Linear(
            1536,
            class_dim,
            weight_attr=ParamAttr(
                initializer=Uniform(-stdv, stdv), name="final_fc_weights"),
            bias_attr=ParamAttr(name="final_fc_offset"))
Ejemplo n.º 10
0
    def __init__(self,
                 config,
                 stop_grad_layers=0,
                 class_num=1000,
                 return_patterns=None):
        super().__init__()

        self.stop_grad_layers = stop_grad_layers

        self.conv_block_1 = ConvBlock(3, 64, config[0])
        self.conv_block_2 = ConvBlock(64, 128, config[1])
        self.conv_block_3 = ConvBlock(128, 256, config[2])
        self.conv_block_4 = ConvBlock(256, 512, config[3])
        self.conv_block_5 = ConvBlock(512, 512, config[4])

        self.relu = nn.ReLU()
        self.flatten = nn.Flatten(start_axis=1, stop_axis=-1)

        for idx, block in enumerate([
                self.conv_block_1, self.conv_block_2, self.conv_block_3,
                self.conv_block_4, self.conv_block_5
        ]):
            if self.stop_grad_layers >= idx + 1:
                for param in block.parameters():
                    param.trainable = False

        self.drop = Dropout(p=0.5, mode="downscale_in_infer")
        self.fc1 = Linear(7 * 7 * 512, 4096)
        self.fc2 = Linear(4096, 4096)
        self.fc3 = Linear(4096, class_num)
        if return_patterns is not None:
            self.update_res(return_patterns)
            self.register_forward_post_hook(self._return_dict_hook)
Ejemplo n.º 11
0
    def __init__(self, num_classes, backbone_indices, backbone_channels,
                 gc_channels, ratio):

        super(Gcnhead, self).__init__()

        in_channels = backbone_channels[0]
        self.conv_bn_relu1 = M.layers.ConvBNReLU(in_channels=in_channels,
                                                 out_channels=gc_channels,
                                                 kernel_size=3,
                                                 padding=1)

        self.gc_block = M.GlobalContextBlock(in_channels=gc_channels,
                                             ratio=ratio)

        self.conv_bn_relu2 = M.layers.ConvBNReLU(in_channels=gc_channels,
                                                 out_channels=gc_channels,
                                                 kernel_size=3,
                                                 padding=1)

        self.conv_bn_relu3 = M.layers.ConvBNReLU(in_channels=in_channels +
                                                 gc_channels,
                                                 out_channels=gc_channels,
                                                 kernel_size=3,
                                                 padding=1)

        self.dropout = Dropout(p=0.1)

        self.conv = Conv2D(in_channels=gc_channels,
                           out_channels=num_classes,
                           kernel_size=1)

        self.backbone_indices = backbone_indices
Ejemplo n.º 12
0
    def __init__(self, num_classes=1000, with_pool=True):
        super(GoogLeNet, self).__init__()
        self.num_classes = num_classes
        self.with_pool = with_pool

        self._conv = ConvLayer(3, 64, 7, 2)
        self._pool = MaxPool2D(kernel_size=3, stride=2)
        self._conv_1 = ConvLayer(64, 64, 1)
        self._conv_2 = ConvLayer(64, 192, 3)

        self._ince3a = Inception(192, 192, 64, 96, 128, 16, 32, 32)
        self._ince3b = Inception(256, 256, 128, 128, 192, 32, 96, 64)

        self._ince4a = Inception(480, 480, 192, 96, 208, 16, 48, 64)
        self._ince4b = Inception(512, 512, 160, 112, 224, 24, 64, 64)
        self._ince4c = Inception(512, 512, 128, 128, 256, 24, 64, 64)
        self._ince4d = Inception(512, 512, 112, 144, 288, 32, 64, 64)
        self._ince4e = Inception(528, 528, 256, 160, 320, 32, 128, 128)

        self._ince5a = Inception(832, 832, 256, 160, 320, 32, 128, 128)
        self._ince5b = Inception(832, 832, 384, 192, 384, 48, 128, 128)

        if with_pool:
            # out
            self._pool_5 = AdaptiveAvgPool2D(1)
            # out1
            self._pool_o1 = AvgPool2D(kernel_size=5, stride=3)
            # out2
            self._pool_o2 = AvgPool2D(kernel_size=5, stride=3)

        if num_classes > 0:
            # out
            self._drop = Dropout(p=0.4, mode="downscale_in_infer")
            self._fc_out = Linear(
                1024, num_classes, weight_attr=xavier(1024, 1))

            # out1
            self._conv_o1 = ConvLayer(512, 128, 1)
            self._fc_o1 = Linear(1152, 1024, weight_attr=xavier(2048, 1))
            self._drop_o1 = Dropout(p=0.7, mode="downscale_in_infer")
            self._out1 = Linear(1024, num_classes, weight_attr=xavier(1024, 1))

            # out2
            self._conv_o2 = ConvLayer(528, 128, 1)
            self._fc_o2 = Linear(1152, 1024, weight_attr=xavier(2048, 1))
            self._drop_o2 = Dropout(p=0.7, mode="downscale_in_infer")
            self._out2 = Linear(1024, num_classes, weight_attr=xavier(1024, 1))
Ejemplo n.º 13
0
    def __init__(self, num_classes=1000, with_pool=True):
        super().__init__()
        self.num_classes = num_classes
        self.with_pool = with_pool
        self.layers_config = {
            "inception_a": [[192, 256, 288], [32, 64, 64]],
            "inception_b": [288],
            "inception_c": [[768, 768, 768, 768], [128, 160, 160, 192]],
            "inception_d": [768],
            "inception_e": [1280, 2048]
        }

        inception_a_list = self.layers_config["inception_a"]
        inception_c_list = self.layers_config["inception_c"]
        inception_b_list = self.layers_config["inception_b"]
        inception_d_list = self.layers_config["inception_d"]
        inception_e_list = self.layers_config["inception_e"]

        self.inception_stem = InceptionStem()

        self.inception_block_list = nn.LayerList()
        for i in range(len(inception_a_list[0])):
            inception_a = InceptionA(inception_a_list[0][i],
                                     inception_a_list[1][i])
            self.inception_block_list.append(inception_a)

        for i in range(len(inception_b_list)):
            inception_b = InceptionB(inception_b_list[i])
            self.inception_block_list.append(inception_b)

        for i in range(len(inception_c_list[0])):
            inception_c = InceptionC(inception_c_list[0][i],
                                     inception_c_list[1][i])
            self.inception_block_list.append(inception_c)

        for i in range(len(inception_d_list)):
            inception_d = InceptionD(inception_d_list[i])
            self.inception_block_list.append(inception_d)

        for i in range(len(inception_e_list)):
            inception_e = InceptionE(inception_e_list[i])
            self.inception_block_list.append(inception_e)

        if with_pool:
            self.avg_pool = AdaptiveAvgPool2D(1)

        if num_classes > 0:
            self.dropout = Dropout(p=0.2, mode="downscale_in_infer")
            stdv = 1.0 / math.sqrt(2048 * 1.0)
            self.fc = Linear(
                2048,
                num_classes,
                weight_attr=ParamAttr(initializer=Uniform(-stdv, stdv)),
                bias_attr=ParamAttr())
Ejemplo n.º 14
0
    def __init__(self,
                 class_num=1000,
                 scale=1.0,
                 dropout_prob=0.2,
                 class_expand=1280):
        super().__init__()
        self.scale = scale
        self.class_num = class_num
        self.class_expand = class_expand
        stage_repeats = [3, 7, 3]
        stage_out_channels = [
            -1, 24,
            make_divisible(116 * scale),
            make_divisible(232 * scale),
            make_divisible(464 * scale), 1024
        ]

        self.conv1 = ConvBNLayer(in_channels=3,
                                 out_channels=stage_out_channels[1],
                                 kernel_size=3,
                                 stride=2)
        self.max_pool = MaxPool2D(kernel_size=3, stride=2, padding=1)

        block_list = []
        for stage_id, num_repeat in enumerate(stage_repeats):
            for i in range(num_repeat):
                if i == 0:
                    block = ESBlock2(
                        in_channels=stage_out_channels[stage_id + 1],
                        out_channels=stage_out_channels[stage_id + 2])
                else:
                    block = ESBlock1(
                        in_channels=stage_out_channels[stage_id + 2],
                        out_channels=stage_out_channels[stage_id + 2])
                block_list.append(block)
        self.blocks = nn.Sequential(*block_list)

        self.conv2 = ConvBNLayer(in_channels=stage_out_channels[-2],
                                 out_channels=stage_out_channels[-1],
                                 kernel_size=1)

        self.avg_pool = AdaptiveAvgPool2D(1)

        self.last_conv = Conv2D(in_channels=stage_out_channels[-1],
                                out_channels=self.class_expand,
                                kernel_size=1,
                                stride=1,
                                padding=0,
                                bias_attr=False)
        self.hardswish = nn.Hardswish()
        self.dropout = Dropout(p=dropout_prob, mode="downscale_in_infer")
        self.flatten = nn.Flatten(start_axis=1, stop_axis=-1)
        self.fc = Linear(self.class_expand, self.class_num)
Ejemplo n.º 15
0
    def __init__(self, num_classes=1):
        super(AlexNet, self).__init__()
        # AlexNet与LeNet一样也会同时使用卷积和池化层提取图像特征
        # 与LeNet不同的是激活函数换成了‘relu’
        self.conv1 = Conv2D(in_channels=3,
                            out_channels=96,
                            kernel_size=11,
                            stride=4,
                            padding=5)
        self.max_pool1 = MaxPool2D(kernel_size=2, stride=2)
        self.conv2 = Conv2D(in_channels=96,
                            out_channels=256,
                            kernel_size=5,
                            stride=1,
                            padding=2)
        self.max_pool2 = MaxPool2D(kernel_size=2, stride=2)
        self.conv3 = Conv2D(in_channels=256,
                            out_channels=384,
                            kernel_size=3,
                            stride=1,
                            padding=1)
        self.conv4 = Conv2D(in_channels=384,
                            out_channels=384,
                            kernel_size=3,
                            stride=1,
                            padding=1)
        self.conv5 = Conv2D(in_channels=384,
                            out_channels=256,
                            kernel_size=3,
                            stride=1,
                            padding=1)
        self.max_pool5 = MaxPool2D(kernel_size=2, stride=2)

        self.fc1 = Linear(in_features=12544, out_features=4096)
        self.drop_ratio1 = 0.5
        self.drop1 = Dropout(self.drop_ratio1)
        self.fc2 = Linear(in_features=4096, out_features=4096)
        self.drop_ratio2 = 0.5
        self.drop2 = Dropout(self.drop_ratio2)
        self.fc3 = Linear(in_features=4096, out_features=num_classes)
Ejemplo n.º 16
0
    def __init__(self, version, num_classes=1000, with_pool=True):
        super(SqueezeNet, self).__init__()
        self.version = version
        self.num_classes = num_classes
        self.with_pool = with_pool

        supported_versions = ['1.0', '1.1']
        assert version in supported_versions, \
            "supported versions are {} but input version is {}".format(
                supported_versions, version)

        if self.version == "1.0":
            self._conv = Conv2D(
                3,
                96,
                7,
                stride=2,
                weight_attr=ParamAttr(),
                bias_attr=ParamAttr())
            self._pool = MaxPool2D(kernel_size=3, stride=2, padding=0)
            self._conv1 = MakeFire(96, 16, 64, 64)
            self._conv2 = MakeFire(128, 16, 64, 64)
            self._conv3 = MakeFire(128, 32, 128, 128)
            self._conv4 = MakeFire(256, 32, 128, 128)
            self._conv5 = MakeFire(256, 48, 192, 192)
            self._conv6 = MakeFire(384, 48, 192, 192)
            self._conv7 = MakeFire(384, 64, 256, 256)
            self._conv8 = MakeFire(512, 64, 256, 256)
        else:
            self._conv = Conv2D(
                3,
                64,
                3,
                stride=2,
                padding=1,
                weight_attr=ParamAttr(),
                bias_attr=ParamAttr())
            self._pool = MaxPool2D(kernel_size=3, stride=2, padding=0)
            self._conv1 = MakeFire(64, 16, 64, 64)
            self._conv2 = MakeFire(128, 16, 64, 64)
            self._conv3 = MakeFire(128, 32, 128, 128)
            self._conv4 = MakeFire(256, 32, 128, 128)
            self._conv5 = MakeFire(256, 48, 192, 192)
            self._conv6 = MakeFire(384, 48, 192, 192)
            self._conv7 = MakeFire(384, 64, 256, 256)
            self._conv8 = MakeFire(512, 64, 256, 256)

        self._drop = Dropout(p=0.5, mode="downscale_in_infer")
        self._conv9 = Conv2D(
            512, num_classes, 1, weight_attr=ParamAttr(), bias_attr=ParamAttr())
        self._avg_pool = AdaptiveAvgPool2D(1)
Ejemplo n.º 17
0
    def __init__(self,
                 config,
                 stages_pattern,
                 class_num=1000,
                 return_patterns=None,
                 return_stages=None):
        super().__init__()

        self.inception_a_list = config["inception_a"]
        self.inception_c_list = config["inception_c"]
        self.inception_b_list = config["inception_b"]
        self.inception_d_list = config["inception_d"]
        self.inception_e_list = config["inception_e"]

        self.inception_stem = InceptionStem()

        self.inception_block_list = nn.LayerList()
        for i in range(len(self.inception_a_list[0])):
            inception_a = InceptionA(self.inception_a_list[0][i],
                                     self.inception_a_list[1][i])
            self.inception_block_list.append(inception_a)

        for i in range(len(self.inception_b_list)):
            inception_b = InceptionB(self.inception_b_list[i])
            self.inception_block_list.append(inception_b)

        for i in range(len(self.inception_c_list[0])):
            inception_c = InceptionC(self.inception_c_list[0][i],
                                     self.inception_c_list[1][i])
            self.inception_block_list.append(inception_c)

        for i in range(len(self.inception_d_list)):
            inception_d = InceptionD(self.inception_d_list[i])
            self.inception_block_list.append(inception_d)

        for i in range(len(self.inception_e_list)):
            inception_e = InceptionE(self.inception_e_list[i])
            self.inception_block_list.append(inception_e)

        self.avg_pool = AdaptiveAvgPool2D(1)
        self.dropout = Dropout(p=0.2, mode="downscale_in_infer")
        stdv = 1.0 / math.sqrt(2048 * 1.0)
        self.fc = Linear(
            2048,
            class_num,
            weight_attr=ParamAttr(initializer=Uniform(-stdv, stdv)),
            bias_attr=ParamAttr())

        super().init_res(stages_pattern,
                         return_patterns=return_patterns,
                         return_stages=return_stages)
Ejemplo n.º 18
0
    def __init__(self, version, class_num=1000):
        super(SqueezeNet, self).__init__()
        self.version = version

        if self.version == "1.0":
            self._conv = Conv2D(3,
                                96,
                                7,
                                stride=2,
                                weight_attr=ParamAttr(name="conv1_weights"),
                                bias_attr=ParamAttr(name="conv1_offset"))
            self._pool = MaxPool2D(kernel_size=3, stride=2, padding=0)
            self._conv1 = MakeFire(96, 16, 64, 64, name="fire2")
            self._conv2 = MakeFire(128, 16, 64, 64, name="fire3")
            self._conv3 = MakeFire(128, 32, 128, 128, name="fire4")

            self._conv4 = MakeFire(256, 32, 128, 128, name="fire5")
            self._conv5 = MakeFire(256, 48, 192, 192, name="fire6")
            self._conv6 = MakeFire(384, 48, 192, 192, name="fire7")
            self._conv7 = MakeFire(384, 64, 256, 256, name="fire8")

            self._conv8 = MakeFire(512, 64, 256, 256, name="fire9")
        else:
            self._conv = Conv2D(3,
                                64,
                                3,
                                stride=2,
                                padding=1,
                                weight_attr=ParamAttr(name="conv1_weights"),
                                bias_attr=ParamAttr(name="conv1_offset"))
            self._pool = MaxPool2D(kernel_size=3, stride=2, padding=0)
            self._conv1 = MakeFire(64, 16, 64, 64, name="fire2")
            self._conv2 = MakeFire(128, 16, 64, 64, name="fire3")

            self._conv3 = MakeFire(128, 32, 128, 128, name="fire4")
            self._conv4 = MakeFire(256, 32, 128, 128, name="fire5")

            self._conv5 = MakeFire(256, 48, 192, 192, name="fire6")
            self._conv6 = MakeFire(384, 48, 192, 192, name="fire7")
            self._conv7 = MakeFire(384, 64, 256, 256, name="fire8")
            self._conv8 = MakeFire(512, 64, 256, 256, name="fire9")

        self._drop = Dropout(p=0.5, mode="downscale_in_infer")
        self._conv9 = Conv2D(512,
                             class_num,
                             1,
                             weight_attr=ParamAttr(name="conv10_weights"),
                             bias_attr=ParamAttr(name="conv10_offset"))
        self._avg_pool = AdaptiveAvgPool2D(1)
Ejemplo n.º 19
0
    def __init__(self, input_size, block, layers, zero_init_residual=True):
        super(ResNet, self).__init__()
        assert input_size[0] in [
            112, 224
        ], "input_size should be [112, 112] or [224, 224]"
        self.inplanes = 64
        self.zero_init_residual = zero_init_residual
        self.conv1 = Conv2D(3,
                            64,
                            kernel_size=7,
                            stride=2,
                            padding=3,
                            weight_attr=paddle.ParamAttr(
                                initializer=nn.initializer.KaimingNormal()))
        self.bn1 = BatchNorm2D(
            64,
            weight_attr=paddle.ParamAttr(
                initializer=nn.initializer.Constant(value=1),
                regularizer=paddle.regularizer.L1Decay(0.0)),
            bias_attr=paddle.ParamAttr(
                initializer=nn.initializer.Constant(value=0),
                regularizer=paddle.regularizer.L1Decay(0.0)))
        self.relu = ReLU()
        self.maxpool = MaxPool2D(kernel_size=3, stride=2, padding=1)
        self.layer1 = self._make_layer(block, 64, layers[0])
        self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
        self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
        self.layer4 = self._make_layer(block, 512, layers[3], stride=2)

        self.bn_o1 = BatchNorm2D(
            2048,
            weight_attr=paddle.ParamAttr(
                initializer=nn.initializer.Constant(value=0.0),
                regularizer=paddle.regularizer.L1Decay(0.0)),
            bias_attr=paddle.ParamAttr(
                regularizer=paddle.regularizer.L1Decay(0.0)))
        self.dropout = Dropout()
        if input_size[0] == 112:
            self.fc = Linear(2048 * 4 * 4, 512)
        else:
            self.fc = Linear(2048 * 8 * 8, 512)
        self.bn_o2 = BatchNorm1D(
            512,
            weight_attr=paddle.ParamAttr(
                initializer=nn.initializer.Constant(value=0.0),
                regularizer=paddle.regularizer.L1Decay(0.0)),
            bias_attr=paddle.ParamAttr(
                regularizer=paddle.regularizer.L1Decay(0.0)))
Ejemplo n.º 20
0
    def __init__(self, class_dim: int = 1000, load_checkpoint: str = None):
        super(InceptionV4, self).__init__()
        self._inception_stem = InceptionStem()

        self._inceptionA_1 = InceptionA(name="1")
        self._inceptionA_2 = InceptionA(name="2")
        self._inceptionA_3 = InceptionA(name="3")
        self._inceptionA_4 = InceptionA(name="4")
        self._reductionA = ReductionA()

        self._inceptionB_1 = InceptionB(name="1")
        self._inceptionB_2 = InceptionB(name="2")
        self._inceptionB_3 = InceptionB(name="3")
        self._inceptionB_4 = InceptionB(name="4")
        self._inceptionB_5 = InceptionB(name="5")
        self._inceptionB_6 = InceptionB(name="6")
        self._inceptionB_7 = InceptionB(name="7")
        self._reductionB = ReductionB()

        self._inceptionC_1 = InceptionC(name="1")
        self._inceptionC_2 = InceptionC(name="2")
        self._inceptionC_3 = InceptionC(name="3")

        self.avg_pool = AdaptiveAvgPool2d(1)
        self._drop = Dropout(p=0.2, mode="downscale_in_infer")
        stdv = 1.0 / math.sqrt(1536 * 1.0)
        self.out = Linear(
            1536,
            class_dim,
            weight_attr=ParamAttr(initializer=Uniform(-stdv, stdv), name="final_fc_weights"),
            bias_attr=ParamAttr(name="final_fc_offset"))

        if load_checkpoint is not None:
            model_dict = paddle.load(load_checkpoint)[0]
            self.set_dict(model_dict)
            print("load custom checkpoint success")

        else:
            checkpoint = os.path.join(self.directory, 'inceptionv4_imagenet.pdparams')
            if not os.path.exists(checkpoint):
                os.system(
                    'wget https://paddlehub.bj.bcebos.com/dygraph/image_classification/inceptionv4_imagenet.pdparams -O'
                    + checkpoint)
            model_dict = paddle.load(checkpoint)[0]
            self.set_dict(model_dict)
            print("load pretrained checkpoint success")
Ejemplo n.º 21
0
    def __init__(self, num_channels, growth_rate, bn_size, dropout):
        super(DenseLayer, self).__init__()
        self.dropout = dropout

        self.bn_ac_func1 = BNACConvLayer(num_channels=num_channels,
                                         num_filters=bn_size * growth_rate,
                                         filter_size=1,
                                         pad=0,
                                         stride=1)

        self.bn_ac_func2 = BNACConvLayer(num_channels=bn_size * growth_rate,
                                         num_filters=growth_rate,
                                         filter_size=3,
                                         pad=1,
                                         stride=1)

        if dropout:
            self.dropout_func = Dropout(p=dropout, mode="downscale_in_infer")
Ejemplo n.º 22
0
    def __init__(self, class_dim=1000):
        super(InceptionV3, self).__init__()
        self.inception_a_list = [[192, 256, 288], [32, 64, 64]]
        self.inception_c_list = [[768, 768, 768, 768], [128, 160, 160, 192]]

        self.inception_stem = InceptionStem()
        self.inception_block_list = []
        for i in range(len(self.inception_a_list[0])):
            inception_a = self.add_sublayer(
                "inception_a_" + str(i + 1),
                InceptionA(self.inception_a_list[0][i],
                           self.inception_a_list[1][i],
                           name=str(i + 1)))
            self.inception_block_list.append(inception_a)
        inception_b = self.add_sublayer("nception_b_1",
                                        InceptionB(288, name="1"))
        self.inception_block_list.append(inception_b)

        for i in range(len(self.inception_c_list[0])):
            inception_c = self.add_sublayer(
                "inception_c_" + str(i + 1),
                InceptionC(self.inception_c_list[0][i],
                           self.inception_c_list[1][i],
                           name=str(i + 1)))
            self.inception_block_list.append(inception_c)
        inception_d = self.add_sublayer("inception_d_1",
                                        InceptionD(768, name="1"))
        self.inception_block_list.append(inception_d)
        inception_e = self.add_sublayer("inception_e_1",
                                        InceptionE(1280, name="1"))
        self.inception_block_list.append(inception_e)
        inception_e = self.add_sublayer("inception_e_2",
                                        InceptionE(2048, name="2"))
        self.inception_block_list.append(inception_e)

        self.gap = AdaptiveAvgPool2D(1)
        self.drop = Dropout(p=0.2, mode="downscale_in_infer")
        stdv = 1.0 / math.sqrt(2048 * 1.0)
        self.out = Linear(2048,
                          class_dim,
                          weight_attr=ParamAttr(initializer=Uniform(
                              -stdv, stdv),
                                                name="fc_weights"),
                          bias_attr=ParamAttr(name="fc_offset"))
Ejemplo n.º 23
0
    def __init__(self,
                 num_classes,
                 in_channels,
                 loss_cfg=dict(name='CrossEntropyLoss'),
                 drop_ratio=0.4,
                 std=0.01,
                 **kwargs):

        super().__init__(num_classes, in_channels, loss_cfg, **kwargs)
        self.drop_ratio = drop_ratio
        self.std = std

        #NOTE: global pool performance
        self.avgpool2d = AdaptiveAvgPool2D((1, 1))

        if self.drop_ratio != 0:
            self.dropout = Dropout(p=self.drop_ratio)
        else:
            self.dropout = None

        self.fc = Linear(self.in_channels, self.num_classes)
Ejemplo n.º 24
0
    def __init__(self,
                 dropout_prob: float = 0.2,
                 class_dim: int = 1000,
                 load_checkpoint: str = None):
        super(MobileNetV3Small, self).__init__()

        inplanes = 16
        self.cfg = [
            # k, exp, c,  se,     nl,  s,
            [3, 16, 16, True, "relu", 2],
            [3, 72, 24, False, "relu", 2],
            [3, 88, 24, False, "relu", 1],
            [5, 96, 40, True, "hard_swish", 2],
            [5, 240, 40, True, "hard_swish", 1],
            [5, 240, 40, True, "hard_swish", 1],
            [5, 120, 48, True, "hard_swish", 1],
            [5, 144, 48, True, "hard_swish", 1],
            [5, 288, 96, True, "hard_swish", 2],
            [5, 576, 96, True, "hard_swish", 1],
            [5, 576, 96, True, "hard_swish", 1],
        ]
        self.cls_ch_squeeze = 576
        self.cls_ch_expand = 1280

        self.conv1 = ConvBNLayer(in_c=3,
                                 out_c=make_divisible(inplanes),
                                 filter_size=3,
                                 stride=2,
                                 padding=1,
                                 num_groups=1,
                                 if_act=True,
                                 act="hard_swish",
                                 name="conv1")

        self.block_list = []
        i = 0
        inplanes = make_divisible(inplanes)
        for (k, exp, c, se, nl, s) in self.cfg:
            self.block_list.append(
                ResidualUnit(in_c=inplanes,
                             mid_c=make_divisible(exp),
                             out_c=make_divisible(c),
                             filter_size=k,
                             stride=s,
                             use_se=se,
                             act=nl,
                             name="conv" + str(i + 2)))
            self.add_sublayer(sublayer=self.block_list[-1],
                              name="conv" + str(i + 2))
            inplanes = make_divisible(c)
            i += 1

        self.last_second_conv = ConvBNLayer(in_c=inplanes,
                                            out_c=make_divisible(
                                                self.cls_ch_squeeze),
                                            filter_size=1,
                                            stride=1,
                                            padding=0,
                                            num_groups=1,
                                            if_act=True,
                                            act="hard_swish",
                                            name="conv_last")

        self.pool = AdaptiveAvgPool2d(1)

        self.last_conv = Conv2d(
            in_channels=make_divisible(self.cls_ch_squeeze),
            out_channels=self.cls_ch_expand,
            kernel_size=1,
            stride=1,
            padding=0,
            weight_attr=ParamAttr(name="last_1x1_conv_weights"),
            bias_attr=False)

        self.dropout = Dropout(p=dropout_prob, mode="downscale_in_infer")

        self.out = Linear(self.cls_ch_expand,
                          class_dim,
                          weight_attr=ParamAttr("fc_weights"),
                          bias_attr=ParamAttr(name="fc_offset"))

        if load_checkpoint is not None:
            model_dict = paddle.load(load_checkpoint)[0]
            self.set_dict(model_dict)
            print("load custom checkpoint success")

        else:
            checkpoint = os.path.join(self.directory,
                                      'mobilenet_v3_small_ssld.pdparams')
            if not os.path.exists(checkpoint):
                os.system(
                    'wget https://paddlehub.bj.bcebos.com/dygraph/image_classification/mobilenet_v3_small_ssld.pdparams -O '
                    + checkpoint)
            model_dict = paddle.load(checkpoint)[0]
            self.set_dict(model_dict)
            print("load pretrained checkpoint success")
Ejemplo n.º 25
0
    def __init__(self,
                 is_test: bool = True,
                 padding_type: str = 'DYNAMIC',
                 override_params: dict = None,
                 use_se: bool = False,
                 class_dim: int = 1000,
                 load_checkpoint: str = None):
        super(EfficientNet_B0, self).__init__()

        model_name = 'efficientnet-b0'
        self.name = "b0"
        self._block_args, self._global_params = get_model_params(model_name, override_params)
        self.padding_type = padding_type
        self.use_se = use_se
        self.is_test = is_test

        self._ef = ExtractFeatures(
            3,
            self._block_args,
            self._global_params,
            self.padding_type,
            self.use_se,
            self.is_test,
            model_name=self.name)

        output_channels = round_filters(1280, self._global_params)
        oup = 320

        self._conv = ConvBNLayer(
            oup,
            1,
            output_channels,
            bn_act="swish",
            padding_type=self.padding_type,
            name="",
            conv_name="_conv_head",
            bn_name="_bn1",
            model_name=self.name,
            cur_stage=7)
        self._pool = AdaptiveAvgPool2d(1)

        if self._global_params.dropout_rate:
            self._drop = Dropout(p=self._global_params.dropout_rate, mode="upscale_in_train")

        param_attr, bias_attr = init_fc_layer("_fc")
        self._fc = Linear(output_channels, class_dim, weight_attr=param_attr, bias_attr=bias_attr)

        if load_checkpoint is not None:
            model_dict = paddle.load(load_checkpoint)[0]
            self.set_dict(model_dict)
            print("load custom checkpoint success")

        else:
            checkpoint = os.path.join(self.directory, 'efficientnet_b0_small_imagenet.pdparams')
            if not os.path.exists(checkpoint):
                os.system(
                    'wget https://paddlehub.bj.bcebos.com/dygraph/image_classification/efficientnet_b0_small_imagenet.pdparams -O '
                    + checkpoint)
            model_dict = paddle.load(checkpoint)[0]
            self.set_dict(model_dict)
            print("load pretrained checkpoint success")
Ejemplo n.º 26
0
    def __init__(self, args):
        super(CDRModel, self).__init__()
        self.args = args
        self.use_mut = args.use_mut
        self.use_gexp = args.use_gexp
        self.use_methy = args.use_methy
        self.units_list = args.units_list  # [256, 256, 256, 100]
        self.gnn_type = args.gnn_type  # 'gcn'
        self.act = args.act
        self.layer_num = args.layer_num  # 4
        self.pool_type = args.pool_type  # 'max'

        self.gnn_layers = paddle.nn.LayerList()
        self.bns = paddle.nn.LayerList()
        self.dropout = paddle.nn.LayerList()
        if self.gnn_type == 'gcn':
            for layer_id in range(self.layer_num):
                self.gnn_layers.append(
                    pgl.nn.GCNConv(self._get_in_size(layer_id),
                                   self.units_list[layer_id],
                                   activation=self.act))
                bn = BatchNorm1D(self.units_list[layer_id], data_format='NC')
                self.bns.append(bn)
                dp = Dropout(0.2)
                self.dropout.append(dp)
            self.graph_pooling = pgl.nn.GraphPool(self.pool_type)
            self.linear = Linear(self.units_list[self.layer_num - 1] + 300,
                                 300)

        elif self.gnn_type == 'gin':
            for layer_id in range(self.layer_num):
                self.gnn_layers.append(
                    pgl.nn.GINConv(self._get_in_size(layer_id),
                                   self.units_list[layer_id],
                                   activation=self.act))
                dp = Dropout(0.2)
                self.dropout.append(dp)
            self.graph_pooling = pgl.nn.GraphPool(self.pool_type)
            self.linear = Linear(self.units_list[self.layer_num - 1] + 300,
                                 300)

        elif self.gnn_type == 'graphsage':
            for layer_id in range(self.layer_num):
                self.gnn_layers.append(
                    pgl.nn.GraphSageConv(self._get_in_size(layer_id),
                                         self.units_list[layer_id]))
                dp = Dropout(0.2)
                self.dropout.append(dp)
            self.graph_pooling = pgl.nn.GraphPool(self.pool_type)
            self.linear = Linear(self.units_list[self.layer_num - 1] + 300,
                                 300)

        self.MP2 = MaxPool2D(kernel_size=(1, 5), data_format='NHWC')
        self.MP3 = MaxPool2D(kernel_size=(1, 10), data_format='NHWC')
        self.MP4 = MaxPool2D(kernel_size=(1, 2), data_format='NHWC')
        self.MP5 = MaxPool2D(kernel_size=(1, 3), data_format='NHWC')
        self.MP6 = MaxPool2D(kernel_size=(1, 3), data_format='NHWC')

        self.Conv1 = Conv2D(in_channels=1,
                            out_channels=50,
                            kernel_size=(1, 700),
                            stride=(1, 5),
                            padding='valid',
                            data_format='NHWC')
        self.Conv2 = Conv2D(in_channels=50,
                            out_channels=30,
                            kernel_size=(1, 5),
                            stride=(1, 2),
                            padding='valid',
                            data_format='NHWC')
        self.Conv3 = Conv2D(in_channels=1,
                            out_channels=30,
                            kernel_size=(1, 150),
                            stride=(1, 1),
                            padding='VALID',
                            data_format='NHWC')
        self.Conv4 = Conv2D(in_channels=30,
                            out_channels=10,
                            kernel_size=(1, 5),
                            stride=(1, 1),
                            padding='VALID',
                            data_format='NHWC')
        self.Conv5 = Conv2D(in_channels=10,
                            out_channels=5,
                            kernel_size=(1, 5),
                            stride=(1, 1),
                            padding='VALID',
                            data_format='NHWC')

        self.fc1 = Linear(2010, 100)
        self.fc2 = Linear(697, 256)
        self.fc3 = Linear(256, 100)
        self.fc4 = Linear(808, 256)
        self.fc5 = Linear(256, 100)
        self.fc6 = Linear(30, 1)

        self.tanhs = paddle.nn.LayerList([paddle.nn.Tanh() for _ in range(8)])
        self.relus = paddle.nn.LayerList([paddle.nn.ReLU() for _ in range(8)])
        self.dropout1 = paddle.nn.LayerList([Dropout(0.1) for _ in range(8)])
        self.dropout2 = Dropout(0.2)

        self.flat = paddle.nn.LayerList([Flatten() for _ in range(5)])
Ejemplo n.º 27
0
    def __init__(self,
                 config,
                 stages_pattern,
                 scale=1.0,
                 class_num=1000,
                 inplanes=STEM_CONV_NUMBER,
                 class_squeeze=LAST_SECOND_CONV_LARGE,
                 class_expand=LAST_CONV,
                 dropout_prob=0.2,
                 return_patterns=None,
                 return_stages=None):
        super().__init__()

        self.cfg = config
        self.scale = scale
        self.inplanes = inplanes
        self.class_squeeze = class_squeeze
        self.class_expand = class_expand
        self.class_num = class_num

        self.conv = ConvBNLayer(
            in_c=3,
            out_c=_make_divisible(self.inplanes * self.scale),
            filter_size=3,
            stride=2,
            padding=1,
            num_groups=1,
            if_act=True,
            act="hardswish")

        self.blocks = nn.Sequential(* [
            ResidualUnit(
                in_c=_make_divisible(self.inplanes * self.scale if i == 0 else
                                     self.cfg[i - 1][2] * self.scale),
                mid_c=_make_divisible(self.scale * exp),
                out_c=_make_divisible(self.scale * c),
                filter_size=k,
                stride=s,
                use_se=se,
                act=act) for i, (k, exp, c, se, act, s) in enumerate(self.cfg)
        ])

        self.last_second_conv = ConvBNLayer(
            in_c=_make_divisible(self.cfg[-1][2] * self.scale),
            out_c=_make_divisible(self.scale * self.class_squeeze),
            filter_size=1,
            stride=1,
            padding=0,
            num_groups=1,
            if_act=True,
            act="hardswish")

        self.avg_pool = AdaptiveAvgPool2D(1)

        self.last_conv = Conv2D(
            in_channels=_make_divisible(self.scale * self.class_squeeze),
            out_channels=self.class_expand,
            kernel_size=1,
            stride=1,
            padding=0,
            bias_attr=False)

        self.hardswish = nn.Hardswish()
        if dropout_prob is not None:
            self.dropout = Dropout(p=dropout_prob, mode="downscale_in_infer")
        else:
            self.dropout = None
        self.flatten = nn.Flatten(start_axis=1, stop_axis=-1)

        self.fc = Linear(self.class_expand, class_num)

        super().init_res(
            stages_pattern,
            return_patterns=return_patterns,
            return_stages=return_stages)
Ejemplo n.º 28
0
    def __init__(self, class_num=1000):
        super(GoogLeNetDY, self).__init__()
        self._conv = ConvLayer(3, 64, 7, 2, name="conv1")
        self._pool = MaxPool2D(kernel_size=3, stride=2)
        self._conv_1 = ConvLayer(64, 64, 1, name="conv2_1x1")
        self._conv_2 = ConvLayer(64, 192, 3, name="conv2_3x3")

        self._ince3a = Inception(192,
                                 192,
                                 64,
                                 96,
                                 128,
                                 16,
                                 32,
                                 32,
                                 name="ince3a")
        self._ince3b = Inception(256,
                                 256,
                                 128,
                                 128,
                                 192,
                                 32,
                                 96,
                                 64,
                                 name="ince3b")

        self._ince4a = Inception(480,
                                 480,
                                 192,
                                 96,
                                 208,
                                 16,
                                 48,
                                 64,
                                 name="ince4a")
        self._ince4b = Inception(512,
                                 512,
                                 160,
                                 112,
                                 224,
                                 24,
                                 64,
                                 64,
                                 name="ince4b")
        self._ince4c = Inception(512,
                                 512,
                                 128,
                                 128,
                                 256,
                                 24,
                                 64,
                                 64,
                                 name="ince4c")
        self._ince4d = Inception(512,
                                 512,
                                 112,
                                 144,
                                 288,
                                 32,
                                 64,
                                 64,
                                 name="ince4d")
        self._ince4e = Inception(528,
                                 528,
                                 256,
                                 160,
                                 320,
                                 32,
                                 128,
                                 128,
                                 name="ince4e")

        self._ince5a = Inception(832,
                                 832,
                                 256,
                                 160,
                                 320,
                                 32,
                                 128,
                                 128,
                                 name="ince5a")
        self._ince5b = Inception(832,
                                 832,
                                 384,
                                 192,
                                 384,
                                 48,
                                 128,
                                 128,
                                 name="ince5b")

        self._pool_5 = AdaptiveAvgPool2D(1)

        self._drop = Dropout(p=0.4, mode="downscale_in_infer")
        self._fc_out = Linear(1024,
                              class_num,
                              weight_attr=xavier(1024, 1, "out"),
                              bias_attr=ParamAttr(name="out_offset"))
        self._pool_o1 = AvgPool2D(kernel_size=5, stride=3)
        self._conv_o1 = ConvLayer(512, 128, 1, name="conv_o1")
        self._fc_o1 = Linear(1152,
                             1024,
                             weight_attr=xavier(2048, 1, "fc_o1"),
                             bias_attr=ParamAttr(name="fc_o1_offset"))
        self._drop_o1 = Dropout(p=0.7, mode="downscale_in_infer")
        self._out1 = Linear(1024,
                            class_num,
                            weight_attr=xavier(1024, 1, "out1"),
                            bias_attr=ParamAttr(name="out1_offset"))
        self._pool_o2 = AvgPool2D(kernel_size=5, stride=3)
        self._conv_o2 = ConvLayer(528, 128, 1, name="conv_o2")
        self._fc_o2 = Linear(1152,
                             1024,
                             weight_attr=xavier(2048, 1, "fc_o2"),
                             bias_attr=ParamAttr(name="fc_o2_offset"))
        self._drop_o2 = Dropout(p=0.7, mode="downscale_in_infer")
        self._out2 = Linear(1024,
                            class_num,
                            weight_attr=xavier(1024, 1, "out2"),
                            bias_attr=ParamAttr(name="out2_offset"))
Ejemplo n.º 29
0
    def __init__(self, backbone, class_dim=1000):
        super(XceptionDeeplab, self).__init__()

        bottleneck_params = gen_bottleneck_params(backbone)
        self.backbone = backbone

        self._conv1 = ConvBNLayer(
            3,
            32,
            3,
            stride=2,
            padding=1,
            act="relu",
            name=self.backbone + "/entry_flow/conv1")
        self._conv2 = ConvBNLayer(
            32,
            64,
            3,
            stride=1,
            padding=1,
            act="relu",
            name=self.backbone + "/entry_flow/conv2")

        self.block_num = bottleneck_params["entry_flow"][0]
        self.strides = bottleneck_params["entry_flow"][1]
        self.chns = bottleneck_params["entry_flow"][2]
        self.strides = check_data(self.strides, self.block_num)
        self.chns = check_data(self.chns, self.block_num)

        self.entry_flow = []
        self.middle_flow = []

        self.stride = 2
        self.output_stride = 32
        s = self.stride

        for i in range(self.block_num):
            stride = self.strides[i] if check_stride(s * self.strides[i],
                                                     self.output_stride) else 1
            xception_block = self.add_sublayer(
                self.backbone + "/entry_flow/block" + str(i + 1),
                Xception_Block(
                    input_channels=64 if i == 0 else self.chns[i - 1],
                    output_channels=self.chns[i],
                    strides=[1, 1, self.stride],
                    name=self.backbone + "/entry_flow/block" + str(i + 1)))
            self.entry_flow.append(xception_block)
            s = s * stride
        self.stride = s

        self.block_num = bottleneck_params["middle_flow"][0]
        self.strides = bottleneck_params["middle_flow"][1]
        self.chns = bottleneck_params["middle_flow"][2]
        self.strides = check_data(self.strides, self.block_num)
        self.chns = check_data(self.chns, self.block_num)
        s = self.stride

        for i in range(self.block_num):
            stride = self.strides[i] if check_stride(s * self.strides[i],
                                                     self.output_stride) else 1
            xception_block = self.add_sublayer(
                self.backbone + "/middle_flow/block" + str(i + 1),
                Xception_Block(
                    input_channels=728,
                    output_channels=728,
                    strides=[1, 1, self.strides[i]],
                    skip_conv=False,
                    name=self.backbone + "/middle_flow/block" + str(i + 1)))
            self.middle_flow.append(xception_block)
            s = s * stride
        self.stride = s

        self.block_num = bottleneck_params["exit_flow"][0]
        self.strides = bottleneck_params["exit_flow"][1]
        self.chns = bottleneck_params["exit_flow"][2]
        self.strides = check_data(self.strides, self.block_num)
        self.chns = check_data(self.chns, self.block_num)
        s = self.stride
        stride = self.strides[0] if check_stride(s * self.strides[0],
                                                 self.output_stride) else 1
        self._exit_flow_1 = Xception_Block(
            728,
            self.chns[0], [1, 1, stride],
            name=self.backbone + "/exit_flow/block1")
        s = s * stride
        stride = self.strides[1] if check_stride(s * self.strides[1],
                                                 self.output_stride) else 1
        self._exit_flow_2 = Xception_Block(
            self.chns[0][-1],
            self.chns[1], [1, 1, stride],
            dilation=2,
            has_skip=False,
            activation_fn_in_separable_conv=True,
            name=self.backbone + "/exit_flow/block2")
        s = s * stride

        self.stride = s

        self._drop = Dropout(p=0.5, mode="downscale_in_infer")
        self._pool = AdaptiveAvgPool2D(1)
        self._fc = Linear(
            self.chns[1][-1],
            class_dim,
            weight_attr=ParamAttr(name="fc_weights"),
            bias_attr=ParamAttr(name="fc_bias"))
Ejemplo n.º 30
0
    def __init__(self,
                 scale=1.0,
                 model_name="small",
                 dropout_prob=0.2,
                 class_dim=1000):
        super(MobileNetV3, self).__init__()

        inplanes = 16
        if model_name == "large":
            self.cfg = [
                # k, exp, c,  se,     nl,  s,
                [3, 16, 16, False, "relu", 1],
                [3, 64, 24, False, "relu", 2],
                [3, 72, 24, False, "relu", 1],
                [5, 72, 40, True, "relu", 2],
                [5, 120, 40, True, "relu", 1],
                [5, 120, 40, True, "relu", 1],
                [3, 240, 80, False, "hardswish", 2],
                [3, 200, 80, False, "hardswish", 1],
                [3, 184, 80, False, "hardswish", 1],
                [3, 184, 80, False, "hardswish", 1],
                [3, 480, 112, True, "hardswish", 1],
                [3, 672, 112, True, "hardswish", 1],
                [5, 672, 160, True, "hardswish", 2],
                [5, 960, 160, True, "hardswish", 1],
                [5, 960, 160, True, "hardswish", 1],
            ]
            self.cls_ch_squeeze = 960
            self.cls_ch_expand = 1280
        elif model_name == "small":
            self.cfg = [
                # k, exp, c,  se,     nl,  s,
                [3, 16, 16, True, "relu", 2],
                [3, 72, 24, False, "relu", 2],
                [3, 88, 24, False, "relu", 1],
                [5, 96, 40, True, "hardswish", 2],
                [5, 240, 40, True, "hardswish", 1],
                [5, 240, 40, True, "hardswish", 1],
                [5, 120, 48, True, "hardswish", 1],
                [5, 144, 48, True, "hardswish", 1],
                [5, 288, 96, True, "hardswish", 2],
                [5, 576, 96, True, "hardswish", 1],
                [5, 576, 96, True, "hardswish", 1],
            ]
            self.cls_ch_squeeze = 576
            self.cls_ch_expand = 1280
        else:
            raise NotImplementedError(
                "mode[{}_model] is not implemented!".format(model_name))

        self.conv1 = ConvBNLayer(in_c=3,
                                 out_c=make_divisible(inplanes * scale),
                                 filter_size=3,
                                 stride=2,
                                 padding=1,
                                 num_groups=1,
                                 if_act=True,
                                 act="hardswish",
                                 name="conv1")

        self.block_list = []
        i = 0
        inplanes = make_divisible(inplanes * scale)
        for (k, exp, c, se, nl, s) in self.cfg:
            block = self.add_sublayer(
                "conv" + str(i + 2),
                ResidualUnit(in_c=inplanes,
                             mid_c=make_divisible(scale * exp),
                             out_c=make_divisible(scale * c),
                             filter_size=k,
                             stride=s,
                             use_se=se,
                             act=nl,
                             name="conv" + str(i + 2)))
            self.block_list.append(block)
            inplanes = make_divisible(scale * c)
            i += 1

        self.last_second_conv = ConvBNLayer(in_c=inplanes,
                                            out_c=make_divisible(
                                                scale * self.cls_ch_squeeze),
                                            filter_size=1,
                                            stride=1,
                                            padding=0,
                                            num_groups=1,
                                            if_act=True,
                                            act="hardswish",
                                            name="conv_last")

        self.pool = AdaptiveAvgPool2D(1)

        self.last_conv = Conv2D(
            in_channels=make_divisible(scale * self.cls_ch_squeeze),
            out_channels=self.cls_ch_expand,
            kernel_size=1,
            stride=1,
            padding=0,
            weight_attr=ParamAttr(name="last_1x1_conv_weights"),
            bias_attr=False)

        self.dropout = Dropout(p=dropout_prob, mode="downscale_in_infer")

        self.out = Linear(self.cls_ch_expand,
                          class_dim,
                          weight_attr=ParamAttr("fc_weights"),
                          bias_attr=ParamAttr(name="fc_offset"))