def __init__(self, num_layers, mode='ir', opts=None):
     super(BackboneEncoderUsingLastLayerIntoWPlus, self).__init__()
     print('Using BackboneEncoderUsingLastLayerIntoWPlus')
     assert num_layers in [50, 100,
                           152], 'num_layers should be 50,100, or 152'
     assert mode in ['ir', 'ir_se'], 'mode should be ir or ir_se'
     blocks = get_blocks(num_layers)
     if mode == 'ir':
         unit_module = bottleneck_IR
     elif mode == 'ir_se':
         unit_module = bottleneck_IR_SE
     self.input_layer = Sequential(
         Conv2D(opts.input_nc, 64, (3, 3), 1, 1, bias_attr=False),
         BatchNorm2D(64), PReLU(64))
     self.output_layer_2 = Sequential(BatchNorm2D(512),
                                      paddle.nn.AdaptiveAvgPool2D((7, 7)),
                                      Flatten(), Linear(512 * 7 * 7, 512))
     self.linear = EqualLinear(512, 512 * 18, lr_mul=1)
     modules = []
     for block in blocks:
         for bottleneck in block:
             modules.append(
                 unit_module(bottleneck.in_channel, bottleneck.depth,
                             bottleneck.stride))
     self.body = Sequential(*modules)
示例#2
0
    def __init__(self, num_classes=59, backbone='resnet50'):
        super(PSPNet, self).__init__()

        res = ResNet101(pretrained=False)
        # stem: res.conv, res.pool2d_max
        self.layer0 = Sequential(
            res.conv,
            res.pool2d_max
        )
        self.layer1 = res.layer1
        self.layer2 = res.layer2
        self.layer3 = res.layer3
        self.layer4 = res.layer4

        num_channels = 2048
        # psp: 2048 -> 2048*2
        self.pspmoduls = PSPModule(num_channels, [1,2,3,6])
        num_channels *= 2
        # cls: 2048*2 -> 512 -> num_classes
        self.classifier = Sequential(
            Conv2D(num_channels,512,kernel_size=3,padding=1),
            BatchNorm(512,act='relu'),
            Dropout(0.1),
            Conv2D(512,num_classes,kernel_size=1)
        )
示例#3
0
文件: iresnet.py 项目: bilylee/DCQ
    def __init__(self, input_size, num_layers, out_dim, mode='ir'):
        super(Backbone, self).__init__()
        assert input_size[0] in [
            112, 224
        ], "input_size should be [112, 112] or [224, 224]"
        assert num_layers in [34, 50, 100,
                              152], "num_layers should be 50, 100 or 152"
        assert mode in ['ir', 'ir_se'], "mode should be ir or ir_se"
        blocks = get_blocks(num_layers)
        if mode == 'ir':
            unit_module = BottleneckIR
        elif mode == 'ir_se':
            unit_module = BottleneckIRSE
        self.input_layer = Sequential(
            Conv2D(3, 64, (3, 3), 1, 1, bias_attr=False), BatchNorm2D(64),
            PReLU(64))
        if input_size[0] == 112:
            self.output_layer = Sequential(BatchNorm2D(512), Dropout(),
                                           Flatten(),
                                           Linear(512 * 7 * 7, out_dim),
                                           BatchNorm1D(out_dim))
        else:
            self.output_layer = Sequential(BatchNorm2D(512), Dropout(),
                                           Flatten(),
                                           Linear(512 * 14 * 14, out_dim),
                                           BatchNorm1D(out_dim))

        modules = []
        for block in blocks:
            for bottleneck in block:
                modules.append(
                    unit_module(bottleneck.in_channel, bottleneck.depth,
                                bottleneck.stride))
        self.body = Sequential(*modules)
示例#4
0
    def __init__(self, num_classes=10, classifier_activation='softmax'):
        super(LeNet, self).__init__()
        self.num_classes = num_classes
        self.features = Sequential(Conv2d(1, 6, 3, stride=1,
                                          padding=1), ReLU(),
                                   Pool2D(2, 'max', 2),
                                   Conv2d(6, 16, 5, stride=1, padding=0),
                                   ReLU(), Pool2D(2, 'max', 2))

        if num_classes > 0:
            self.fc = Sequential(Linear(400, 120), Linear(120, 84),
                                 Linear(84, 10),
                                 Softmax())  #Todo: accept any activation
示例#5
0
    def __init__(self, num_classes=10):
        super(LeNetDygraph, self).__init__()
        self.num_classes = num_classes
        self.features = Sequential(Conv2D(1, 6, 3, stride=1,
                                          padding=1), ReLU(),
                                   paddle.fluid.dygraph.Pool2D(2, 'max', 2),
                                   Conv2D(6, 16, 5, stride=1, padding=0),
                                   ReLU(),
                                   paddle.fluid.dygraph.Pool2D(2, 'max', 2))

        if num_classes > 0:
            self.fc = Sequential(Linear(400, 120), Linear(120, 84),
                                 Linear(84, 10))
 def __init__(self, in_channel, depth, stride):
     super(bottleneck_IR, self).__init__()
     if in_channel == depth:
         self.shortcut_layer = MaxPool2D(1, stride)
     else:
         self.shortcut_layer = Sequential(
             Conv2D(in_channel, depth, (1, 1), stride, bias_attr=False),
             BatchNorm2D(depth))
     self.res_layer = Sequential(
         BatchNorm2D(in_channel),
         Conv2D(in_channel, depth, (3, 3), (1, 1), 1, bias_attr=False),
         PReLU(depth),
         Conv2D(depth, depth, (3, 3), stride, 1, bias_attr=False),
         BatchNorm2D(depth))
示例#7
0
    def __init__(self, num_classes=10):
        super(LeNetListInput, self).__init__()
        self.num_classes = num_classes
        self.cov = Conv2D(1, 6, 3, stride=1, padding=1)
        for param in self.cov.parameters():
            param.trainable = False
        self.features = Sequential(self.cov, ReLU(),
                                   paddle.fluid.dygraph.Pool2D(2, 'max', 2),
                                   Conv2D(6, 16, 5, stride=1, padding=0),
                                   ReLU(),
                                   paddle.fluid.dygraph.Pool2D(2, 'max', 2))

        if num_classes > 0:
            self.fc = Sequential(Linear(400, 120), Linear(120, 84),
                                 Linear(84, 10))
 def __init__(self,
              mask_roi_extractor=None,
              num_convs=0,
              feat_in=2048,
              feat_out=256,
              mask_num_stages=1,
              share_bbox_feat=False):
     super(MaskFeat, self).__init__()
     self.num_convs = num_convs
     self.feat_in = feat_in
     self.feat_out = feat_out
     self.mask_roi_extractor = mask_roi_extractor
     self.mask_num_stages = mask_num_stages
     self.share_bbox_feat = share_bbox_feat
     self.upsample_module = []
     fan_conv = feat_out * 3 * 3
     fan_deconv = feat_out * 2 * 2
     for i in range(self.mask_num_stages):
         name = 'stage_{}'.format(i)
         mask_conv = Sequential()
         for j in range(self.num_convs):
             conv_name = 'mask_inter_feat_{}'.format(j + 1)
             mask_conv.add_sublayer(
                 conv_name,
                 Conv2D(in_channels=feat_in if j == 0 else feat_out,
                        out_channels=feat_out,
                        kernel_size=3,
                        padding=1,
                        weight_attr=ParamAttr(initializer=KaimingNormal(
                            fan_in=fan_conv)),
                        bias_attr=ParamAttr(learning_rate=2.,
                                            regularizer=L2Decay(0.))))
             mask_conv.add_sublayer(conv_name + 'act', ReLU())
         mask_conv.add_sublayer(
             'conv5_mask',
             Conv2DTranspose(
                 in_channels=self.feat_in,
                 out_channels=self.feat_out,
                 kernel_size=2,
                 stride=2,
                 weight_attr=ParamAttr(initializer=KaimingNormal(
                     fan_in=fan_deconv)),
                 bias_attr=ParamAttr(learning_rate=2.,
                                     regularizer=L2Decay(0.))))
         mask_conv.add_sublayer('conv5_mask' + 'act', ReLU())
         upsample = self.add_sublayer(name, mask_conv)
         self.upsample_module.append(upsample)
    def __init__(self, num_layers, mode='ir', opts=None):
        super(GradualStyleEncoder, self).__init__()
        assert num_layers in [50, 100,
                              152], 'num_layers should be 50,100, or 152'
        assert mode in ['ir', 'ir_se'], 'mode should be ir or ir_se'
        blocks = get_blocks(num_layers)
        if mode == 'ir':
            unit_module = bottleneck_IR
        elif mode == 'ir_se':
            unit_module = bottleneck_IR_SE
        self.input_layer = Sequential(
            Conv2D(opts.input_nc, 64, (3, 3), 1, 1, bias_attr=False),
            BatchNorm2D(64), PReLU(64))
        modules = []
        for block in blocks:
            for bottleneck in block:
                modules.append(
                    unit_module(bottleneck.in_channel, bottleneck.depth,
                                bottleneck.stride))
        self.body = Sequential(*modules)

        self.styles = nn.LayerList()
        self.style_count = 18
        self.coarse_ind = 3
        self.middle_ind = 7
        for i in range(self.style_count):
            if i < self.coarse_ind:
                style = GradualStyleBlock(512, 512, 16)
            elif i < self.middle_ind:
                style = GradualStyleBlock(512, 512, 32)
            else:
                style = GradualStyleBlock(512, 512, 64)
            self.styles.append(style)
        self.latlayer1 = nn.Conv2D(256,
                                   512,
                                   kernel_size=1,
                                   stride=1,
                                   padding=0)
        self.latlayer2 = nn.Conv2D(128,
                                   512,
                                   kernel_size=1,
                                   stride=1,
                                   padding=0)
示例#10
0
 def __init__(self, num_channels, bin_size_list):
     super(PSPModule, self).__init__()
     self.bn_size_list = bin_size_list
     num_filters = num_channels // len(bin_size_list)
     self.features = []
     for i in range(len(bin_size_list)):
         self.features.append(
             Sequential(
                 AdaptiveMaxPool2D(self.bn_size_list[i]),
                 Conv2D(in_channels=num_channels,out_channels=num_filters,kernel_size=1),
                 BatchNorm(num_filters,act='relu')
             )
         )
示例#11
0
    def __init__(self, num_classes=10, classifier_activation='softmax'):
        super(ImperativeLenet, self).__init__()
        self.features = Sequential(
            Conv2D(num_channels=1,
                   num_filters=6,
                   filter_size=3,
                   stride=1,
                   padding=1),
            Pool2D(pool_size=2, pool_type='max', pool_stride=2),
            Conv2D(num_channels=6,
                   num_filters=16,
                   filter_size=5,
                   stride=1,
                   padding=0),
            Pool2D(pool_size=2, pool_type='max', pool_stride=2))

        self.fc = Sequential(
            Linear(input_dim=400, output_dim=120),
            Linear(input_dim=120, output_dim=84),
            Linear(input_dim=84,
                   output_dim=num_classes,
                   act=classifier_activation))
    def _make_layer(self, block, planes, blocks, stride=1):
        downsample = None
        if stride != 1 or self.inplanes != planes * block.expansion:
            downsample = Sequential(
                conv1x1(self.inplanes, planes * block.expansion, stride),
                BatchNorm2D(
                    planes * block.expansion,
                    weight_attr=paddle.ParamAttr(
                        initializer=nn.initializer.Constant(value=0.0)),
                    bias_attr=paddle.ParamAttr(
                        regularizer=paddle.regularizer.L1Decay(0.0))),
            )

        layers = []
        layers.append(
            block(self.inplanes, planes, stride, downsample,
                  self.zero_init_residual))
        self.inplanes = planes * block.expansion
        for _ in range(1, blocks):
            layers.append(block(self.inplanes, planes,
                                self.zero_init_residual))

        return Sequential(*layers)
示例#13
0
    def __init__(self, num_classes=59, IMG_SIZE=None, backbone='resnet50'):
        super(GIoNet, self).__init__()

        res = ResNet101(pretrained=False)
        # stem: res.conv, res.pool2d_max
        self.layer0 = Sequential(res.conv, res.pool2d_max)
        self.layer1 = res.layer1
        self.layer2 = res.layer2
        self.layer3 = res.layer3
        self.layer4 = res.layer4

        in_channels = 2048
        out_channels = 256
        # gionet channels 2048-> 256 num_nodes = H' *W'
        self.layer5 = Sequential(
            Conv2D(in_channels=in_channels,
                   out_channels=out_channels,
                   kernel_size=1), BatchNorm(out_channels, act='relu'))
        num_node = ceil(IMG_SIZE[0] / 8) * ceil(IMG_SIZE[1] / 8)
        self.gcnmodule = GCNModule(num_channels=out_channels,
                                   num_nodes=num_node)
        self.classifier = Conv2D(in_channels=out_channels,
                                 out_channels=num_classes,
                                 kernel_size=1)
def SeperableConv2d(in_channels,
                    out_channels,
                    kernel_size=1,
                    stride=1,
                    padding=0):
    """Replace Conv2d with a depthwise Conv2d and Pointwise Conv2d.
    """
    return Sequential(
        Conv2D(in_channels=in_channels,
               out_channels=in_channels,
               kernel_size=kernel_size,
               groups=in_channels,
               stride=stride,
               padding=padding),
        ReLU(),
        Conv2D(in_channels=in_channels,
               out_channels=out_channels,
               kernel_size=1),
    )
示例#15
0
 def __init__(self, layer_num, ch_out, name=None):
     super(ShortCut, self).__init__()
     shortcut_conv = Sequential()
     ch_in = ch_out * 2
     for i in range(layer_num):
         fan_out = 3 * 3 * ch_out
         std = math.sqrt(2. / fan_out)
         in_channels = ch_in if i == 0 else ch_out
         shortcut_name = name + '.conv.{}'.format(i)
         shortcut_conv.add_sublayer(
             shortcut_name,
             Conv2D(in_channels=in_channels,
                    out_channels=ch_out,
                    kernel_size=3,
                    padding=1,
                    weight_attr=ParamAttr(initializer=Normal(0, std)),
                    bias_attr=ParamAttr(learning_rate=2.,
                                        regularizer=L2Decay(0.))))
         if i < layer_num - 1:
             shortcut_conv.add_sublayer(shortcut_name + '.act', ReLU())
     self.shortcut = self.add_sublayer('short', shortcut_conv)
def create_Mb_Tiny_RFB_fd(num_classes, is_test=False, device="cuda"):
    base_net = Mb_Tiny_RFB(2)
    base_net_model = base_net.model  # disable dropout layer

    source_layer_indexes = [8, 11, 13]
    extras = LayerList([
        Sequential(
            Conv2D(in_channels=base_net.base_channel * 16,
                   out_channels=base_net.base_channel * 4,
                   kernel_size=1), ReLU(),
            SeperableConv2d(in_channels=base_net.base_channel * 4,
                            out_channels=base_net.base_channel * 16,
                            kernel_size=3,
                            stride=2,
                            padding=1), ReLU())
    ])

    regression_headers = LayerList([
        SeperableConv2d(in_channels=base_net.base_channel * 4,
                        out_channels=3 * 4,
                        kernel_size=3,
                        padding=1),
        SeperableConv2d(in_channels=base_net.base_channel * 8,
                        out_channels=2 * 4,
                        kernel_size=3,
                        padding=1),
        SeperableConv2d(in_channels=base_net.base_channel * 16,
                        out_channels=2 * 4,
                        kernel_size=3,
                        padding=1),
        Conv2D(in_channels=base_net.base_channel * 16,
               out_channels=3 * 4,
               kernel_size=3,
               padding=1)
    ])

    classification_headers = LayerList([
        SeperableConv2d(in_channels=base_net.base_channel * 4,
                        out_channels=3 * num_classes,
                        kernel_size=3,
                        padding=1),
        SeperableConv2d(in_channels=base_net.base_channel * 8,
                        out_channels=2 * num_classes,
                        kernel_size=3,
                        padding=1),
        SeperableConv2d(in_channels=base_net.base_channel * 16,
                        out_channels=2 * num_classes,
                        kernel_size=3,
                        padding=1),
        Conv2D(in_channels=base_net.base_channel * 16,
               out_channels=3 * num_classes,
               kernel_size=3,
               padding=1)
    ])

    return SSD(num_classes,
               base_net_model,
               source_layer_indexes,
               extras,
               classification_headers,
               regression_headers,
               is_test=is_test,
               config=config,
               device=device)
 def __init__(self, c, num_block, groups, kernel=(3, 3), stride=(1, 1), padding=(1, 1)):
     super(Residual, self).__init__()
     modules = []
     for _ in range(num_block):
         modules.append(DepthWise(c, c, residual=True, kernel=kernel, padding=padding, stride=stride, groups=groups))
     self.model = Sequential(*modules)
示例#18
0
    def __init__(self, layers=50, num_classes=1000):
        super(ResNet, self).__init__()
        self.layers = layers
        supported_layers = [18, 34, 50, 101, 152]
        assert layers in supported_layers

        if layers == 18:
            depth = [2, 2, 2, 2]
        elif layers == 34:
            depth = [3, 4, 6, 3]
        elif layers == 50:
            depth = [3, 4, 6, 3]
        elif layers == 101:
            depth = [3, 4, 23, 3]
        elif layers == 152:
            depth = [3, 8, 36, 3]

        if layers < 50:
            num_channels = [64, 64, 128, 256]
        else:
            num_channels = [64, 256, 512, 1024]

        num_filters = [64, 128, 256, 512]

        self.conv = ConvBNLayer(num_channels=3,
                                num_filters=64,
                                filter_size=7,
                                stride=2,
                                act='relu')
        self.pool2d_max = MaxPool2D(kernel_size=3, stride=2, padding=1)
        if layers < 50:
            block = BasicBlock
            l1_shortcut = True
        else:
            block = BottleneckBlock
            l1_shortcut = False

        self.layer1 = Sequential(*self.make_layer(block,
                                                  num_channels[0],
                                                  num_filters[0],
                                                  depth[0],
                                                  stride=1,
                                                  shortcut=l1_shortcut,
                                                  name='layer1'))
        self.layer2 = Sequential(*self.make_layer(block,
                                                  num_channels[1],
                                                  num_filters[1],
                                                  depth[1],
                                                  stride=2,
                                                  name='layer2'))
        self.layer3 = Sequential(*self.make_layer(block,
                                                  num_channels[2],
                                                  num_filters[2],
                                                  depth[2],
                                                  stride=1,
                                                  name='layer3',
                                                  dilation=2))
        self.layer4 = Sequential(*self.make_layer(block,
                                                  num_channels[3],
                                                  num_filters[3],
                                                  depth[3],
                                                  stride=1,
                                                  name='layer4',
                                                  dilation=4))
        self.last_pool = AvgPool2D(
            kernel_size=7  # ignore if global_pooling is True
        )
        self.fc = Linear(in_features=num_filters[-1] * block.expansion,
                         out_features=num_classes)

        self.out_dim = num_filters[-1] * block.expansion