Esempio n. 1
0
 def __init__(self, in_dim=256, mlp_dim=1024, resolution=7, num_stages=1):
     super(TwoFCHead, self).__init__()
     self.in_dim = in_dim
     self.mlp_dim = mlp_dim
     self.num_stages = num_stages
     fan = in_dim * resolution * resolution
     self.fc6_list = []
     self.fc6_relu_list = []
     self.fc7_list = []
     self.fc7_relu_list = []
     for stage in range(num_stages):
         fc6_name = 'fc6_{}'.format(stage)
         fc7_name = 'fc7_{}'.format(stage)
         fc6 = self.add_sublayer(
             fc6_name,
             nn.Linear(in_dim * resolution * resolution,
                       mlp_dim,
                       weight_attr=ParamAttr(initializer=XavierUniform(
                           fan_out=fan)),
                       bias_attr=ParamAttr(learning_rate=2.,
                                           regularizer=L2Decay(0.))))
         fc6_relu = self.add_sublayer(fc6_name + 'act', ReLU())
         fc7 = self.add_sublayer(
             fc7_name,
             nn.Linear(mlp_dim,
                       mlp_dim,
                       weight_attr=ParamAttr(initializer=XavierUniform()),
                       bias_attr=ParamAttr(learning_rate=2.,
                                           regularizer=L2Decay(0.))))
         fc7_relu = self.add_sublayer(fc7_name + 'act', ReLU())
         self.fc6_list.append(fc6)
         self.fc6_relu_list.append(fc6_relu)
         self.fc7_list.append(fc7)
         self.fc7_relu_list.append(fc7_relu)
Esempio n. 2
0
    def __init__(self, num_classes=10, classifier_activation='softmax'):
        super(LeNet, self).__init__()
        self.num_classes = num_classes
        self.features = Sequential(Conv2d(1, 6, 3, stride=1,
                                          padding=1), ReLU(),
                                   Pool2D(2, 'max', 2),
                                   Conv2d(6, 16, 5, stride=1, padding=0),
                                   ReLU(), Pool2D(2, 'max', 2))

        if num_classes > 0:
            self.fc = Sequential(Linear(400, 120), Linear(120, 84),
                                 Linear(84, 10),
                                 Softmax())  #Todo: accept any activation
Esempio n. 3
0
    def __init__(self, num_classes=10):
        super(LeNetDygraph, self).__init__()
        self.num_classes = num_classes
        self.features = Sequential(Conv2D(1, 6, 3, stride=1,
                                          padding=1), ReLU(),
                                   paddle.fluid.dygraph.Pool2D(2, 'max', 2),
                                   Conv2D(6, 16, 5, stride=1, padding=0),
                                   ReLU(),
                                   paddle.fluid.dygraph.Pool2D(2, 'max', 2))

        if num_classes > 0:
            self.fc = Sequential(Linear(400, 120), Linear(120, 84),
                                 Linear(84, 10))
Esempio n. 4
0
    def __init__(self, num_classes=10):
        super(LeNetListInput, self).__init__()
        self.num_classes = num_classes
        self.cov = Conv2D(1, 6, 3, stride=1, padding=1)
        for param in self.cov.parameters():
            param.trainable = False
        self.features = Sequential(self.cov, ReLU(),
                                   paddle.fluid.dygraph.Pool2D(2, 'max', 2),
                                   Conv2D(6, 16, 5, stride=1, padding=0),
                                   ReLU(),
                                   paddle.fluid.dygraph.Pool2D(2, 'max', 2))

        if num_classes > 0:
            self.fc = Sequential(Linear(400, 120), Linear(120, 84),
                                 Linear(84, 10))
Esempio n. 5
0
    def __init__(self,
                 num_channels,
                 num_filters,
                 stride=1,
                 shortcut=True,
                 name=None):
        super(BasicBlock, self).__init__(name)

        self.conv0 = ConvBNLayer(num_channels=num_channels,
                                 num_filters=num_filters,
                                 filter_size=3,
                                 stride=stride,
                                 act='relu',
                                 name=name)
        self.conv1 = ConvBNLayer(num_channels=num_filters,
                                 num_filters=num_filters,
                                 filter_size=3,
                                 act=None,
                                 name=name)
        if not shortcut:
            self.short = ConvBNLayer(num_channels=num_channels,
                                     num_filters=num_filters,
                                     filter_size=1,
                                     stride=stride,
                                     act=None,
                                     name=name)
        self.shortcut = shortcut

        self.relu = ReLU()
 def __init__(self,
              inplanes,
              planes,
              stride=1,
              downsample=None,
              zero_init_residual=True):
     super(BasicBlock, self).__init__()
     self.conv1 = conv3x3(inplanes, planes, stride)
     self.bn1 = BatchNorm2D(
         planes,
         weight_attr=paddle.ParamAttr(
             regularizer=paddle.regularizer.L1Decay(0.0)),
         bias_attr=paddle.ParamAttr(
             regularizer=paddle.regularizer.L1Decay(0.0)))
     self.relu = ReLU()
     self.conv2 = conv3x3(planes, planes)
     if zero_init_residual:
         self.bn2 = BatchNorm2D(
             planes,
             weight_attr=paddle.ParamAttr(
                 initializer=nn.initializer.Constant(value=0.0),
                 regularizer=paddle.regularizer.L1Decay(0.0)),
             bias_attr=paddle.ParamAttr(
                 regularizer=paddle.regularizer.L1Decay(0.0)))
     else:
         self.bn2 = BatchNorm2D(
             planes,
             weight_attr=paddle.ParamAttr(
                 regularizer=paddle.regularizer.L1Decay(0.0)),
             bias_attr=paddle.ParamAttr(
                 regularizer=paddle.regularizer.L1Decay(0.0)))
     self.downsample = downsample
     self.stride = stride
Esempio n. 7
0
def make_layers(cfg, batch_norm=False):
    layers = []
    in_channels = 3

    for v in cfg:
        if v == 'M':
            layers += [Pool2D(pool_size=2, pool_stride=2)]
        else:
            if batch_norm:
                conv2d = Conv2d(in_channels, v, kernel_size=3, padding=1)
                layers += [conv2d, BatchNorm(v), ReLU()]
            else:
                conv2d = Conv2d(in_channels, v, kernel_size=3, padding=1)
                layers += [conv2d, ReLU()]
            in_channels = v
    return Sequential(*layers)
Esempio n. 8
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 filter_size,
                 pool_size,
                 pool_stride,
                 pool_padding=0,
                 conv_stride=1,
                 conv_padding=0,
                 conv_dilation=1,
                 conv_groups=1,
                 weight_attr=None,
                 bias_attr=None):
        super(SimpleImgConvPool, self).__init__()

        # Conv2D网络的初始化
        self._conv2d = Conv2D(in_channels=in_channels,
                              out_channels=out_channels,
                              kernel_size=filter_size,
                              stride=conv_stride,
                              padding=conv_padding,
                              dilation=conv_dilation,
                              groups=conv_groups,
                              weight_attr=weight_attr,
                              bias_attr=bias_attr)

        # ReLU激活的初始化
        self._relu = ReLU()

        # Pool2D网络的初始化
        self._pool2d = MaxPool2D(kernel_size=pool_size,
                                 stride=pool_stride,
                                 padding=pool_padding)
Esempio n. 9
0
    def __init__(self,
                 input_channels,
                 output_channels,
                 filter_size,
                 stride,
                 padding,
                 stdv,
                 groups=1,
                 act=None,
                 name=None):
        super(ConvPoolLayer, self).__init__()

        self.relu = ReLU() if act == "relu" else None

        self._conv = Conv2D(
            in_channels=input_channels,
            out_channels=output_channels,
            kernel_size=filter_size,
            stride=stride,
            padding=padding,
            groups=groups,
            weight_attr=ParamAttr(
                name=name + "_weights", initializer=Uniform(-stdv, stdv)),
            bias_attr=ParamAttr(
                name=name + "_offset", initializer=Uniform(-stdv, stdv)))
        self._pool = MaxPool2D(kernel_size=3, stride=2, padding=0)
 def __init__(self,
              mask_roi_extractor=None,
              num_convs=0,
              feat_in=2048,
              feat_out=256,
              mask_num_stages=1,
              share_bbox_feat=False):
     super(MaskFeat, self).__init__()
     self.num_convs = num_convs
     self.feat_in = feat_in
     self.feat_out = feat_out
     self.mask_roi_extractor = mask_roi_extractor
     self.mask_num_stages = mask_num_stages
     self.share_bbox_feat = share_bbox_feat
     self.upsample_module = []
     fan_conv = feat_out * 3 * 3
     fan_deconv = feat_out * 2 * 2
     for i in range(self.mask_num_stages):
         name = 'stage_{}'.format(i)
         mask_conv = Sequential()
         for j in range(self.num_convs):
             conv_name = 'mask_inter_feat_{}'.format(j + 1)
             mask_conv.add_sublayer(
                 conv_name,
                 Conv2D(in_channels=feat_in if j == 0 else feat_out,
                        out_channels=feat_out,
                        kernel_size=3,
                        padding=1,
                        weight_attr=ParamAttr(initializer=KaimingNormal(
                            fan_in=fan_conv)),
                        bias_attr=ParamAttr(learning_rate=2.,
                                            regularizer=L2Decay(0.))))
             mask_conv.add_sublayer(conv_name + 'act', ReLU())
         mask_conv.add_sublayer(
             'conv5_mask',
             Conv2DTranspose(
                 in_channels=self.feat_in,
                 out_channels=self.feat_out,
                 kernel_size=2,
                 stride=2,
                 weight_attr=ParamAttr(initializer=KaimingNormal(
                     fan_in=fan_deconv)),
                 bias_attr=ParamAttr(learning_rate=2.,
                                     regularizer=L2Decay(0.))))
         mask_conv.add_sublayer('conv5_mask' + 'act', ReLU())
         upsample = self.add_sublayer(name, mask_conv)
         self.upsample_module.append(upsample)
Esempio n. 11
0
 def __init__(self):
     super(Model, self).__init__()
     with supernet(kernel_size=(3, 5, 7), expand_ratio=[1, 2,
                                                        4]) as ofa_super:
         models = []
         models += [nn.Conv2D(1, 6, 3)]
         models += [ReLU()]
         models += [nn.Pool2D(2, 'max', 2)]
         models += [nn.Conv2D(6, 16, 5, padding=0)]
         models += [ReLU()]
         models += [nn.Pool2D(2, 'max', 2)]
         models += [
             nn.Linear(784, 120),
             nn.Linear(120, 84),
             nn.Linear(84, 10)
         ]
         models = ofa_super.convert(models)
     self.models = paddle.nn.Sequential(*models)
Esempio n. 12
0
    def __init__(self, num_classes=10):
        super(ImperativeLenetWithSkipQuant, self).__init__()

        conv2d_w1_attr = fluid.ParamAttr(name="conv2d_w_1")
        conv2d_w2_attr = fluid.ParamAttr(name="conv2d_w_2")
        fc_w1_attr = fluid.ParamAttr(name="fc_w_1")
        fc_w2_attr = fluid.ParamAttr(name="fc_w_2")
        fc_w3_attr = fluid.ParamAttr(name="fc_w_3")
        conv2d_b1_attr = fluid.ParamAttr(name="conv2d_b_1")
        conv2d_b2_attr = fluid.ParamAttr(name="conv2d_b_2")
        fc_b1_attr = fluid.ParamAttr(name="fc_b_1")
        fc_b2_attr = fluid.ParamAttr(name="fc_b_2")
        fc_b3_attr = fluid.ParamAttr(name="fc_b_3")
        self.conv2d_0 = Conv2D(in_channels=1,
                               out_channels=6,
                               kernel_size=3,
                               stride=1,
                               padding=1,
                               weight_attr=conv2d_w1_attr,
                               bias_attr=conv2d_b1_attr)
        self.conv2d_0.skip_quant = True

        self.batch_norm_0 = BatchNorm2D(6)
        self.relu_0 = ReLU()
        self.pool2d_0 = MaxPool2D(kernel_size=2, stride=2)
        self.conv2d_1 = Conv2D(in_channels=6,
                               out_channels=16,
                               kernel_size=5,
                               stride=1,
                               padding=0,
                               weight_attr=conv2d_w2_attr,
                               bias_attr=conv2d_b2_attr)
        self.conv2d_1.skip_quant = False

        self.batch_norm_1 = BatchNorm2D(16)
        self.relu6_0 = ReLU6()
        self.pool2d_1 = MaxPool2D(kernel_size=2, stride=2)
        self.linear_0 = Linear(in_features=400,
                               out_features=120,
                               weight_attr=fc_w1_attr,
                               bias_attr=fc_b1_attr)
        self.linear_0.skip_quant = True

        self.leaky_relu_0 = LeakyReLU()
        self.linear_1 = Linear(in_features=120,
                               out_features=84,
                               weight_attr=fc_w2_attr,
                               bias_attr=fc_b2_attr)
        self.linear_1.skip_quant = False

        self.sigmoid_0 = Sigmoid()
        self.linear_2 = Linear(in_features=84,
                               out_features=num_classes,
                               weight_attr=fc_w3_attr,
                               bias_attr=fc_b3_attr)
        self.linear_2.skip_quant = False
        self.softmax_0 = Softmax()
Esempio n. 13
0
    def __init__(self):
        super(ModelConv, self).__init__()
        with supernet(kernel_size=(3, 5, 7),
                      channel=((4, 8, 12), (8, 12, 16), (8, 12, 16),
                               (8, 12, 16))) as ofa_super:
            models = []
            models += [nn.Conv2D(3, 4, 3, padding=1)]
            models += [nn.InstanceNorm2D(4)]
            models += [ReLU()]
            models += [nn.Conv2D(4, 4, 3, groups=4)]
            models += [nn.InstanceNorm2D(4)]
            models += [ReLU()]
            models += [nn.Conv2DTranspose(4, 4, 3, groups=4, padding=1)]
            models += [nn.BatchNorm2D(4)]
            models += [ReLU()]
            models += [nn.Conv2D(4, 3, 3)]
            models += [ReLU()]
            models = ofa_super.convert(models)

        models += [
            Block(SuperSeparableConv2D(3,
                                       6,
                                       1,
                                       padding=1,
                                       candidate_config={'channel': (3, 6)}),
                  fixed=True)
        ]
        with supernet(kernel_size=(3, 5, 7),
                      expand_ratio=(1, 2, 4)) as ofa_super:
            models1 = []
            models1 += [nn.Conv2D(6, 4, 3)]
            models1 += [nn.BatchNorm2D(4)]
            models1 += [ReLU()]
            models1 += [nn.Conv2D(4, 4, 3, groups=2)]
            models1 += [nn.InstanceNorm2D(4)]
            models1 += [ReLU()]
            models1 += [nn.Conv2DTranspose(4, 4, 3, groups=2)]
            models1 += [nn.BatchNorm2D(4)]
            models1 += [ReLU()]
            models1 += [nn.Conv2DTranspose(4, 4, 3)]
            models1 += [nn.BatchNorm2D(4)]
            models1 += [ReLU()]
            models1 += [nn.Conv2DTranspose(4, 4, 1)]
            models1 += [nn.BatchNorm2D(4)]
            models1 += [ReLU()]
            models1 = ofa_super.convert(models1)

        models += models1
        self.models = paddle.nn.Sequential(*models)
Esempio n. 14
0
 def __init__(self):
     super(ModelCase6, self).__init__()
     self.bn1 = BatchNorm2D(3)
     self.relu1 = ReLU()
     self.fc1 = paddle.nn.Linear(3 * 16 * 16, 3 * 16 * 16)
     self.dp = paddle.nn.Dropout(p=0.5)
     self.lstm = paddle.nn.LSTM(1536,
                                10,
                                direction='bidirectional',
                                num_layers=2)
 def __init__(self, channels, reduction):
     super(SEModule, self).__init__()
     self.avg_pool = AdaptiveAvgPool2D(1)
     self.fc1 = Conv2D(channels,
                       channels // reduction,
                       kernel_size=1,
                       padding=0,
                       bias_attr=False)
     self.relu = ReLU()
     self.fc2 = Conv2D(channels // reduction,
                       channels,
                       kernel_size=1,
                       padding=0,
                       bias_attr=False)
     self.sigmoid = Sigmoid()
    def __init__(self, input_size, block, layers, zero_init_residual=True):
        super(ResNet, self).__init__()
        assert input_size[0] in [
            112, 224
        ], "input_size should be [112, 112] or [224, 224]"
        self.inplanes = 64
        self.zero_init_residual = zero_init_residual
        self.conv1 = Conv2D(3,
                            64,
                            kernel_size=7,
                            stride=2,
                            padding=3,
                            weight_attr=paddle.ParamAttr(
                                initializer=nn.initializer.KaimingNormal()))
        self.bn1 = BatchNorm2D(
            64,
            weight_attr=paddle.ParamAttr(
                initializer=nn.initializer.Constant(value=1),
                regularizer=paddle.regularizer.L1Decay(0.0)),
            bias_attr=paddle.ParamAttr(
                initializer=nn.initializer.Constant(value=0),
                regularizer=paddle.regularizer.L1Decay(0.0)))
        self.relu = ReLU()
        self.maxpool = MaxPool2D(kernel_size=3, stride=2, padding=1)
        self.layer1 = self._make_layer(block, 64, layers[0])
        self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
        self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
        self.layer4 = self._make_layer(block, 512, layers[3], stride=2)

        self.bn_o1 = BatchNorm2D(
            2048,
            weight_attr=paddle.ParamAttr(
                initializer=nn.initializer.Constant(value=0.0),
                regularizer=paddle.regularizer.L1Decay(0.0)),
            bias_attr=paddle.ParamAttr(
                regularizer=paddle.regularizer.L1Decay(0.0)))
        self.dropout = Dropout()
        if input_size[0] == 112:
            self.fc = Linear(2048 * 4 * 4, 512)
        else:
            self.fc = Linear(2048 * 8 * 8, 512)
        self.bn_o2 = BatchNorm1D(
            512,
            weight_attr=paddle.ParamAttr(
                initializer=nn.initializer.Constant(value=0.0),
                regularizer=paddle.regularizer.L1Decay(0.0)),
            bias_attr=paddle.ParamAttr(
                regularizer=paddle.regularizer.L1Decay(0.0)))
Esempio n. 17
0
    def __init__(self, mode="train"):
        ClasModel = paddle.nn.Sequential(
            Conv2D(3, 6, (3, 3)),
            BatchNorm2D(6),
            ReLU(),
            Conv2D(6, 6, (3, 3)),
            BatchNorm2D(6),
            ReLU(),
            MaxPool2D((2, 2)),
            Conv2D(6, 12, (3, 3)),
            BatchNorm2D(12),
            ReLU(),
            Conv2D(12, 12, (3, 3)),
            BatchNorm2D(12),
            ReLU(),
            MaxPool2D((2, 2)),
            Conv2D(12, 8, (3, 3)),
            BatchNorm2D(8),
            ReLU(),
            Conv2D(8, 8, (3, 3)),
            BatchNorm2D(8),
            ReLU(),
            MaxPool2D((2, 2)),
            Flatten(),
            Linear(128, 128),
            ReLU(),
            Linear(128, 32),
            ReLU(),
            Linear(32, 2),
            Softmax(),
        )
        input = InputSpec([None, 3, 64, 64], "float32", "x")
        label = InputSpec([None, 1], "int32", "label")
        model = paddle.Model(ClasModel, inputs=input, labels=label)
        model.prepare(
            paddle.optimizer.Adam(parameters=ClasModel.parameters()),
            paddle.nn.CrossEntropyLoss(),
            paddle.metric.Accuracy(),
        )
        self.model = model

        if mode == "predict":
            self.load_weight()
Esempio n. 18
0
    def __init__(self, num_classes=10):
        super(ImperativeLenet, self).__init__()
        conv2d_w1_attr = fluid.ParamAttr(name="conv2d_w_1")
        conv2d_w2_attr = fluid.ParamAttr(name="conv2d_w_2")
        fc_w1_attr = fluid.ParamAttr(name="fc_w_1")
        fc_w2_attr = fluid.ParamAttr(name="fc_w_2")
        fc_w3_attr = fluid.ParamAttr(name="fc_w_3")
        conv2d_b2_attr = fluid.ParamAttr(name="conv2d_b_2")
        fc_b1_attr = fluid.ParamAttr(name="fc_b_1")
        fc_b2_attr = fluid.ParamAttr(name="fc_b_2")
        fc_b3_attr = fluid.ParamAttr(name="fc_b_3")
        self.features = Sequential(
            Conv2D(in_channels=1,
                   out_channels=6,
                   kernel_size=3,
                   stride=1,
                   padding=1,
                   weight_attr=conv2d_w1_attr,
                   bias_attr=False), BatchNorm2D(6), ReLU(),
            MaxPool2D(kernel_size=2, stride=2),
            Conv2D(in_channels=6,
                   out_channels=16,
                   kernel_size=5,
                   stride=1,
                   padding=0,
                   weight_attr=conv2d_w2_attr,
                   bias_attr=conv2d_b2_attr), BatchNorm2D(16), PReLU(),
            MaxPool2D(kernel_size=2, stride=2))

        self.fc = Sequential(
            Linear(in_features=400,
                   out_features=120,
                   weight_attr=fc_w1_attr,
                   bias_attr=fc_b1_attr), LeakyReLU(),
            Linear(in_features=120,
                   out_features=84,
                   weight_attr=fc_w2_attr,
                   bias_attr=fc_b2_attr), Sigmoid(),
            Linear(in_features=84,
                   out_features=num_classes,
                   weight_attr=fc_w3_attr,
                   bias_attr=fc_b3_attr), Softmax())
        self.add = paddle.nn.quant.add()
        self.quant_stub = paddle.nn.quant.QuantStub()
Esempio n. 19
0
    def __init__(self):
        super(ModelConv2, self).__init__()
        with supernet(expand_ratio=(1, 2, 4)) as ofa_super:
            models = []
            models += [
                nn.Conv2DTranspose(
                    4, 4, 3, weight_attr=paddle.ParamAttr(name='conv1_w'))
            ]
            models += [
                nn.BatchNorm2D(
                    4,
                    weight_attr=paddle.ParamAttr(name='bn1_w'),
                    bias_attr=paddle.ParamAttr(name='bn1_b'))
            ]
            models += [ReLU()]
            models += [nn.Conv2D(4, 4, 3)]
            models += [nn.BatchNorm2D(4)]
            models += [ReLU()]
            models = ofa_super.convert(models)

        with supernet(channel=((4, 6, 8), (4, 6, 8))) as ofa_super:
            models1 = []
            models1 += [nn.Conv2DTranspose(4, 4, 3)]
            models1 += [nn.BatchNorm2D(4)]
            models1 += [ReLU()]
            models1 += [nn.Conv2DTranspose(4, 4, 3)]
            models1 += [nn.BatchNorm2D(4)]
            models1 += [ReLU()]
            models1 = ofa_super.convert(models1)
        models += models1

        with supernet(kernel_size=(3, 5, 7)) as ofa_super:
            models2 = []
            models2 += [nn.Conv2D(4, 4, 3)]
            models2 += [nn.BatchNorm2D(4)]
            models2 += [ReLU()]
            models2 += [nn.Conv2DTranspose(4, 4, 3)]
            models2 += [nn.BatchNorm2D(4)]
            models2 += [ReLU()]
            models2 += [nn.Conv2D(4, 4, 3)]
            models2 += [nn.BatchNorm2D(4)]
            models2 += [ReLU()]
            models2 = ofa_super.convert(models2)

        models += models2
        self.models = paddle.nn.Sequential(*models)
def SeperableConv2d(in_channels,
                    out_channels,
                    kernel_size=1,
                    stride=1,
                    padding=0):
    """Replace Conv2d with a depthwise Conv2d and Pointwise Conv2d.
    """
    return Sequential(
        Conv2D(in_channels=in_channels,
               out_channels=in_channels,
               kernel_size=kernel_size,
               groups=in_channels,
               stride=stride,
               padding=padding),
        ReLU(),
        Conv2D(in_channels=in_channels,
               out_channels=out_channels,
               kernel_size=1),
    )
Esempio n. 21
0
    def __init__(self,
                 num_channels,
                 filter_size,
                 num_filters,
                 stride,
                 padding,
                 num_groups=1):
        super().__init__()

        self.conv = Conv2D(in_channels=num_channels,
                           out_channels=num_filters,
                           kernel_size=filter_size,
                           stride=stride,
                           padding=padding,
                           groups=num_groups,
                           weight_attr=ParamAttr(initializer=KaimingNormal()),
                           bias_attr=False)
        self.bn = BatchNorm(num_filters)
        self.relu = ReLU()
Esempio n. 22
0
    def __init__(self, in_channels, out_channels):
        super(Decoder, self).__init__()

        self.relus = LayerList([ReLU() for i in range(2)])
        self.conv_transpose_01 = Conv2DTranspose(in_channels,
                                                 out_channels,
                                                 kernel_size=3,
                                                 padding=1)
        self.conv_transpose_02 = Conv2DTranspose(out_channels,
                                                 out_channels,
                                                 kernel_size=3,
                                                 padding=1)
        self.bns = LayerList([BatchNorm2D(out_channels) for i in range(2)])
        self.upsamples = LayerList(
            [Upsample(scale_factor=2.0) for i in range(2)])
        self.residual_conv = Conv2D(in_channels,
                                    out_channels,
                                    kernel_size=1,
                                    padding='same')
Esempio n. 23
0
    def __init__(self, in_channels, out_channels):
        super(Encoder, self).__init__()

        self.relus = LayerList([ReLU() for i in range(2)])
        self.separable_conv_01 = SeparableConv2D(in_channels,
                                                 out_channels,
                                                 kernel_size=3,
                                                 padding='same')
        self.bns = LayerList([BatchNorm2D(out_channels) for i in range(2)])

        self.separable_conv_02 = SeparableConv2D(out_channels,
                                                 out_channels,
                                                 kernel_size=3,
                                                 padding='same')
        self.pool = MaxPool2D(kernel_size=3, stride=2, padding=1)
        self.residual_conv = Conv2D(in_channels,
                                    out_channels,
                                    kernel_size=1,
                                    stride=2,
                                    padding='same')
Esempio n. 24
0
 def __init__(self, layer_num, ch_out, name=None):
     super(ShortCut, self).__init__()
     shortcut_conv = Sequential()
     ch_in = ch_out * 2
     for i in range(layer_num):
         fan_out = 3 * 3 * ch_out
         std = math.sqrt(2. / fan_out)
         in_channels = ch_in if i == 0 else ch_out
         shortcut_name = name + '.conv.{}'.format(i)
         shortcut_conv.add_sublayer(
             shortcut_name,
             Conv2D(in_channels=in_channels,
                    out_channels=ch_out,
                    kernel_size=3,
                    padding=1,
                    weight_attr=ParamAttr(initializer=Normal(0, std)),
                    bias_attr=ParamAttr(learning_rate=2.,
                                        regularizer=L2Decay(0.))))
         if i < layer_num - 1:
             shortcut_conv.add_sublayer(shortcut_name + '.act', ReLU())
     self.shortcut = self.add_sublayer('short', shortcut_conv)
Esempio n. 25
0
    def __init__(self,
                 num_channels,
                 num_filters,
                 stride=1,
                 shortcut=True,
                 dilation=1,
                 padding=None,
                 name=None):
        super(BottleneckBlock, self).__init__(name)

        self.conv0 = ConvBNLayer(num_channels=num_channels,
                                 num_filters=num_filters,
                                 filter_size=1,
                                 act='relu')
        #                                 name=name)
        self.conv1 = ConvBNLayer(num_channels=num_filters,
                                 num_filters=num_filters,
                                 filter_size=3,
                                 stride=stride,
                                 padding=padding,
                                 act='relu',
                                 dilation=dilation)
        #                                name=name)
        self.conv2 = ConvBNLayer(num_channels=num_filters,
                                 num_filters=num_filters * 4,
                                 filter_size=1,
                                 stride=1)
        #                                name=name)
        if not shortcut:
            self.short = ConvBNLayer(num_channels=num_channels,
                                     num_filters=num_filters * 4,
                                     filter_size=1,
                                     stride=stride)


#                                     name=name)
        self.shortcut = shortcut
        self.num_channel_out = num_filters * 4
        self.relu = ReLU()
Esempio n. 26
0
        self.data = data
        self.transform = vt.Compose([vt.ToTensor()])

    def __getitem__(self, index):
        data = cv2.imread(osp.join(self.data_path, self.data[index][0]))
        data = self.transform(data)
        label = self.data[index][1]
        return data, label

    def __len__(self):
        return len(self.data)


from paddle.nn import Conv2D, BatchNorm2D, ReLU, Softmax, MaxPool2D, Flatten, Linear

ClasModel = paddle.nn.Sequential(Conv2D(3, 6, (3, 3)), BatchNorm2D(6), ReLU(),
                                 Conv2D(6, 6, (3, 3)), BatchNorm2D(6), ReLU(),
                                 MaxPool2D((2, 2)), Conv2D(6, 12, (3, 3)),
                                 BatchNorm2D(12), ReLU(),
                                 Conv2D(12, 12,
                                        (3, 3)), BatchNorm2D(12), ReLU(),
                                 MaxPool2D((2, 2)), Conv2D(12, 8, (3, 3)),
                                 BatchNorm2D(8), ReLU(), Conv2D(8, 8, (3, 3)),
                                 BatchNorm2D(8), ReLU(), MaxPool2D((2, 2)),
                                 Flatten(), Linear(128, 128), ReLU(),
                                 Linear(128, 32), ReLU(), Linear(32, 2),
                                 Softmax())

train_dataset = HumanClasDataset(mode="train")
eval_dataset = HumanClasDataset(mode="eval")
def create_Mb_Tiny_RFB_fd(num_classes, is_test=False, device="cuda"):
    base_net = Mb_Tiny_RFB(2)
    base_net_model = base_net.model  # disable dropout layer

    source_layer_indexes = [8, 11, 13]
    extras = LayerList([
        Sequential(
            Conv2D(in_channels=base_net.base_channel * 16,
                   out_channels=base_net.base_channel * 4,
                   kernel_size=1), ReLU(),
            SeperableConv2d(in_channels=base_net.base_channel * 4,
                            out_channels=base_net.base_channel * 16,
                            kernel_size=3,
                            stride=2,
                            padding=1), ReLU())
    ])

    regression_headers = LayerList([
        SeperableConv2d(in_channels=base_net.base_channel * 4,
                        out_channels=3 * 4,
                        kernel_size=3,
                        padding=1),
        SeperableConv2d(in_channels=base_net.base_channel * 8,
                        out_channels=2 * 4,
                        kernel_size=3,
                        padding=1),
        SeperableConv2d(in_channels=base_net.base_channel * 16,
                        out_channels=2 * 4,
                        kernel_size=3,
                        padding=1),
        Conv2D(in_channels=base_net.base_channel * 16,
               out_channels=3 * 4,
               kernel_size=3,
               padding=1)
    ])

    classification_headers = LayerList([
        SeperableConv2d(in_channels=base_net.base_channel * 4,
                        out_channels=3 * num_classes,
                        kernel_size=3,
                        padding=1),
        SeperableConv2d(in_channels=base_net.base_channel * 8,
                        out_channels=2 * num_classes,
                        kernel_size=3,
                        padding=1),
        SeperableConv2d(in_channels=base_net.base_channel * 16,
                        out_channels=2 * num_classes,
                        kernel_size=3,
                        padding=1),
        Conv2D(in_channels=base_net.base_channel * 16,
               out_channels=3 * num_classes,
               kernel_size=3,
               padding=1)
    ])

    return SSD(num_classes,
               base_net_model,
               source_layer_indexes,
               extras,
               classification_headers,
               regression_headers,
               is_test=is_test,
               config=config,
               device=device)
Esempio n. 28
0
 def __init__(self, num_channels, num_nodes):
     super(GCNModule, self).__init__()
     self.conv1 = Conv1D(num_nodes, num_nodes, kernel_size=1)
     self.relu = ReLU()
     self.conv2 = Conv1D(num_channels, num_channels, kernel_size=1)
Esempio n. 29
0
 def __init__(self):
     super(ModelCase4, self).__init__()
     self.bn1 = BatchNorm2D(3)
     self.ln1 = LayerNorm([3 * 16 * 16])
     self.relu1 = ReLU()
     self.fc1 = paddle.nn.Linear(3 * 16 * 16, 3 * 16 * 16)