示例#1
0
    def __init__(self, num_channels, num_filters, reduction_ratio, name=None):
        super(SELayer, self).__init__()

        self.pool2d_gap = nn.AdaptiveAvgPool2D(1)

        self._num_channels = num_channels

        med_ch = int(num_channels / reduction_ratio)
        stdv = 1.0 / math.sqrt(num_channels * 1.0)
        self.squeeze = nn.Linear(
            num_channels,
            med_ch,
            weight_attr=ParamAttr(initializer=Uniform(-stdv, stdv),
                                  name=name + "_sqz_weights"),
            bias_attr=ParamAttr(name=name + '_sqz_offset'))

        stdv = 1.0 / math.sqrt(med_ch * 1.0)
        self.excitation = nn.Linear(
            med_ch,
            num_filters,
            weight_attr=ParamAttr(initializer=Uniform(-stdv, stdv),
                                  name=name + "_exc_weights"),
            bias_attr=ParamAttr(name=name + '_exc_offset'))
    def __init__(self,
                 num_channels,
                 filter_size,
                 num_filters,
                 stride,
                 num_groups=1):
        super().__init__()

        self.conv = Conv2D(
            in_channels=num_channels,
            out_channels=num_filters,
            kernel_size=filter_size,
            stride=stride,
            padding=(filter_size - 1) // 2,
            groups=num_groups,
            weight_attr=ParamAttr(initializer=KaimingNormal()),
            bias_attr=False)

        self.bn = BatchNorm(
            num_filters,
            param_attr=ParamAttr(regularizer=L2Decay(0.0)),
            bias_attr=ParamAttr(regularizer=L2Decay(0.0)))
        self.hardswish = nn.Hardswish()
示例#3
0
    def __init__(self,
                 in_channels,
                 channels,
                 kernel_size,
                 stride=1,
                 padding=0,
                 dilation=1,
                 groups=1,
                 bias=True,
                 radix=2,
                 reduction_factor=4,
                 rectify_avg=False,
                 name=None):
        super(SplatConv, self).__init__()

        self.radix = radix

        self.conv1 = ConvBNLayer(num_channels=in_channels,
                                 num_filters=channels * radix,
                                 filter_size=kernel_size,
                                 stride=stride,
                                 groups=groups * radix,
                                 act="relu",
                                 name=name + "_1_weights")

        self.avg_pool2d = AdaptiveAvgPool2D(1)

        inter_channels = int(max(in_channels * radix // reduction_factor, 32))

        # to calc gap
        self.conv2 = ConvBNLayer(num_channels=channels,
                                 num_filters=inter_channels,
                                 filter_size=1,
                                 stride=1,
                                 groups=groups,
                                 act="relu",
                                 name=name + "_2_weights")

        # to calc atten
        self.conv3 = Conv2D(in_channels=inter_channels,
                            out_channels=channels * radix,
                            kernel_size=1,
                            stride=1,
                            padding=0,
                            groups=groups,
                            weight_attr=ParamAttr(name=name + "_weights",
                                                  initializer=KaimingNormal()),
                            bias_attr=False)

        self.rsoftmax = rSoftmax(radix=radix, cardinality=groups)
示例#4
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 kernel_size,
                 stride=1,
                 groups=1,
                 is_tweaks_mode=False,
                 act=None,
                 name=None):
        super(ConvBNLayer, self).__init__()
        self.is_tweaks_mode = is_tweaks_mode
        #ResNet-D 1/2:add a 2×2 average pooling layer with a stride of 2 before the convolution,
        #             whose stride is changed to 1, works well in practice.
        self._pool2d_avg = AvgPool2D(kernel_size=2,
                                     stride=2,
                                     padding=0,
                                     ceil_mode=True)

        self._conv = Conv2D(in_channels=in_channels,
                            out_channels=out_channels,
                            kernel_size=kernel_size,
                            stride=stride,
                            padding=(kernel_size - 1) // 2,
                            groups=groups,
                            weight_attr=ParamAttr(name=name + "_weights"),
                            bias_attr=False)
        if name == "conv1":
            bn_name = "bn_" + name
        else:
            bn_name = "bn" + name[3:]

        self._act = act

        self._batch_norm = BatchNorm2D(
            out_channels,
            weight_attr=ParamAttr(name=bn_name + "_scale"),
            bias_attr=ParamAttr(bn_name + "_offset"))
示例#5
0
    def __init__(
        self,
        in_channels,
        out_channels,
        kernel_size,
        stride=1,
        groups=1,
        is_vd_mode=False,
        act=None,
        name=None,
    ):
        super(ConvBNLayer, self).__init__()

        self.is_vd_mode = is_vd_mode
        self._pool2d_avg = nn.AvgPool2D(kernel_size=2,
                                        stride=2,
                                        padding=0,
                                        ceil_mode=True)
        self._conv = nn.Conv2D(in_channels=in_channels,
                               out_channels=out_channels,
                               kernel_size=kernel_size,
                               stride=stride,
                               padding=(kernel_size - 1) // 2,
                               groups=groups,
                               weight_attr=ParamAttr(name=name + "_weights"),
                               bias_attr=False)
        if name == "conv1":
            bn_name = "bn_" + name
        else:
            bn_name = "bn" + name[3:]
        self._batch_norm = nn.BatchNorm(
            out_channels,
            act=act,
            param_attr=ParamAttr(name=bn_name + '_scale'),
            bias_attr=ParamAttr(bn_name + '_offset'),
            moving_mean_name=bn_name + '_mean',
            moving_variance_name=bn_name + '_variance')
示例#6
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 groups,
                 pool_size=2,
                 pool_stride=2,
                 pool_padding=0,
                 name=None):
        super(ConvBlock, self).__init__()

        self.groups = groups
        self.conv0 = nn.Conv2D(in_channels=in_channels,
                               out_channels=out_channels,
                               kernel_size=3,
                               stride=1,
                               padding=1,
                               weight_attr=ParamAttr(name=name + "1_weights"),
                               bias_attr=ParamAttr(name=name + "1_bias"))
        self.conv_out_list = []
        for i in range(1, groups):
            conv_out = self.add_sublayer(
                'conv{}'.format(i),
                Conv2D(in_channels=out_channels,
                       out_channels=out_channels,
                       kernel_size=3,
                       stride=1,
                       padding=1,
                       weight_attr=ParamAttr(name=name +
                                             "{}_weights".format(i + 1)),
                       bias_attr=ParamAttr(name=name +
                                           "{}_bias".format(i + 1))))
            self.conv_out_list.append(conv_out)

        self.pool = MaxPool2D(kernel_size=pool_size,
                              stride=pool_stride,
                              padding=pool_padding,
                              ceil_mode=True)
示例#7
0
    def __init__(
        self,
        num_channels: int,
        num_filters: int,
        filter_size: int,
        stride: int = 1,
        groups: int = 1,
        is_vd_mode: bool = False,
        act: str = None,
        name: str = None,
    ):
        super(ConvBNLayer, self).__init__()

        self.is_vd_mode = is_vd_mode
        self._pool2d_avg = AvgPool2d(kernel_size=2,
                                     stride=2,
                                     padding=0,
                                     ceil_mode=True)
        self._conv = Conv2d(in_channels=num_channels,
                            out_channels=num_filters,
                            kernel_size=filter_size,
                            stride=stride,
                            padding=(filter_size - 1) // 2,
                            groups=groups,
                            weight_attr=ParamAttr(name=name + "_weights"),
                            bias_attr=False)
        if name == "conv1":
            bn_name = "bn_" + name
        else:
            bn_name = "bn" + name[3:]
        self._batch_norm = BatchNorm(
            num_filters,
            act=act,
            param_attr=ParamAttr(name=bn_name + '_scale'),
            bias_attr=ParamAttr(bn_name + '_offset'),
            moving_mean_name=bn_name + '_mean',
            moving_variance_name=bn_name + '_variance')
示例#8
0
    def __init__(self,
                 anchors=[[10, 13], [16, 30], [33, 23], [30, 61], [62, 45],
                          [59, 119], [116, 90], [156, 198], [373, 326]],
                 anchor_masks=[[6, 7, 8], [3, 4, 5], [0, 1, 2]],
                 num_classes=80,
                 loss='YOLOv3Loss',
                 iou_aware=False,
                 iou_aware_factor=0.4):
        super(YOLOv3Head, self).__init__()
        self.num_classes = num_classes
        self.loss = loss

        self.iou_aware = iou_aware
        self.iou_aware_factor = iou_aware_factor

        self.parse_anchor(anchors, anchor_masks)
        self.num_outputs = len(self.anchors)

        self.yolo_outputs = []
        for i in range(len(self.anchors)):
            if self.iou_aware:
                num_filters = self.num_outputs * (self.num_classes + 6)
            else:
                num_filters = self.num_outputs * (self.num_classes + 5)
            name = 'yolo_output.{}'.format(i)
            yolo_output = self.add_sublayer(
                name,
                nn.Conv2D(
                    in_channels=1024 // (2**i),
                    out_channels=num_filters,
                    kernel_size=1,
                    stride=1,
                    padding=0,
                    weight_attr=ParamAttr(name=name + '.conv.weights'),
                    bias_attr=ParamAttr(
                        name=name + '.conv.bias', regularizer=L2Decay(0.))))
            self.yolo_outputs.append(yolo_output)
 def __init__(self, in_dim=256, mlp_dim=1024, resolution=7, roi_stages=1):
     super(TwoFCHead, self).__init__()
     self.in_dim = in_dim
     self.mlp_dim = mlp_dim
     self.roi_stages = roi_stages
     fan = in_dim * resolution * resolution
     self.fc6_list = []
     self.fc6_relu_list = []
     self.fc7_list = []
     self.fc7_relu_list = []
     for stage in range(roi_stages):
         fc6_name = 'fc6_{}'.format(stage)
         fc7_name = 'fc7_{}'.format(stage)
         lr_factor = 2**stage
         fc6 = self.add_sublayer(
             fc6_name,
             nn.Linear(in_dim * resolution * resolution,
                       mlp_dim,
                       weight_attr=ParamAttr(
                           learning_rate=lr_factor,
                           initializer=XavierUniform(fan_out=fan)),
                       bias_attr=ParamAttr(learning_rate=2. * lr_factor,
                                           regularizer=L2Decay(0.))))
         fc6_relu = self.add_sublayer(fc6_name + 'act', ReLU())
         fc7 = self.add_sublayer(
             fc7_name,
             nn.Linear(mlp_dim,
                       mlp_dim,
                       weight_attr=ParamAttr(learning_rate=lr_factor,
                                             initializer=XavierUniform()),
                       bias_attr=ParamAttr(learning_rate=2. * lr_factor,
                                           regularizer=L2Decay(0.))))
         fc7_relu = self.add_sublayer(fc7_name + 'act', ReLU())
         self.fc6_list.append(fc6)
         self.fc6_relu_list.append(fc6_relu)
         self.fc7_list.append(fc7)
         self.fc7_relu_list.append(fc7_relu)
 def __init__(self,
              input_channels,
              output_channels,
              filter_size,
              stride=1,
              groups=1,
              act=None,
              name=None):
     super(ConvBNLayer, self).__init__()
     if "downsample" in name:
         conv_name = name + ".0"
     else:
         conv_name = name
     self._conv = Conv2D(
         in_channels=input_channels,
         out_channels=output_channels,
         kernel_size=filter_size,
         stride=stride,
         padding=(filter_size - 1) // 2,
         groups=groups,
         weight_attr=ParamAttr(name=conv_name + ".weight"),
         bias_attr=False)
     if "downsample" in name:
         bn_name = name[:9] + "downsample.1"
     else:
         if "conv1" == name:
             bn_name = "bn" + name[-1]
         else:
             bn_name = (name[:10] if name[7:9].isdigit() else name[:9]
                        ) + "bn" + name[-1]
     self._bn = BatchNorm(
         num_channels=output_channels,
         act=act,
         param_attr=ParamAttr(name=bn_name + ".weight"),
         bias_attr=ParamAttr(name=bn_name + ".bias"),
         moving_mean_name=bn_name + ".running_mean",
         moving_variance_name=bn_name + ".running_var")
示例#11
0
    def __init__(self,
                 ch_in,
                 ch_out,
                 filter_size=3,
                 stride=1,
                 groups=1,
                 padding=0,
                 act=None):
        super(ConvBNLayer, self).__init__()

        self.conv = nn.Conv2D(in_channels=ch_in,
                              out_channels=ch_out,
                              kernel_size=filter_size,
                              stride=stride,
                              padding=padding,
                              groups=groups,
                              bias_attr=False)

        self.bn = nn.BatchNorm2D(
            ch_out,
            weight_attr=ParamAttr(regularizer=L2Decay(0.0)),
            bias_attr=ParamAttr(regularizer=L2Decay(0.0)))
        self.act = get_act_fn(act) if act is None or isinstance(
            act, (str, dict)) else act
示例#12
0
 def __init__(self, ch_in, ch_out=64, conv_num=2):
     super(WHHead, self).__init__()
     head_conv = nn.Sequential()
     for i in range(conv_num):
         name = 'conv.{}'.format(i)
         head_conv.add_sublayer(
             name,
             nn.Conv2D(in_channels=ch_in if i == 0 else ch_out,
                       out_channels=ch_out,
                       kernel_size=3,
                       padding=1,
                       weight_attr=ParamAttr(initializer=Normal(0, 0.001)),
                       bias_attr=ParamAttr(learning_rate=2.,
                                           regularizer=L2Decay(0.))))
         head_conv.add_sublayer(name + '.act', nn.ReLU())
     self.feat = self.add_sublayer('wh_feat', head_conv)
     self.head = self.add_sublayer(
         'wh_head',
         nn.Conv2D(in_channels=ch_out,
                   out_channels=4,
                   kernel_size=1,
                   weight_attr=ParamAttr(initializer=Normal(0, 0.001)),
                   bias_attr=ParamAttr(learning_rate=2.,
                                       regularizer=L2Decay(0.))))
示例#13
0
 def __init__(self, channel, lr_mult, conv_decay, reduction=4, name=""):
     super(SEModule, self).__init__()
     self.avg_pool = nn.AdaptiveAvgPool2D(1)
     mid_channels = int(channel // reduction)
     self.conv1 = nn.Conv2D(
         in_channels=channel,
         out_channels=mid_channels,
         kernel_size=1,
         stride=1,
         padding=0,
         weight_attr=ParamAttr(learning_rate=lr_mult,
                               regularizer=L2Decay(conv_decay)),
         bias_attr=ParamAttr(learning_rate=lr_mult,
                             regularizer=L2Decay(conv_decay)))
     self.conv2 = nn.Conv2D(
         in_channels=mid_channels,
         out_channels=channel,
         kernel_size=1,
         stride=1,
         padding=0,
         weight_attr=ParamAttr(learning_rate=lr_mult,
                               regularizer=L2Decay(conv_decay)),
         bias_attr=ParamAttr(learning_rate=lr_mult,
                             regularizer=L2Decay(conv_decay)))
示例#14
0
文件: modeling.py 项目: saxon-zh/hapi
    def __init__(self,
                 ch_in,
                 ch_out,
                 filter_size=3,
                 stride=1,
                 groups=1,
                 padding=0,
                 act="leaky"):
        super(ConvBNLayer, self).__init__()

        self.conv = nn.Conv2D(
            in_channels=ch_in,
            out_channels=ch_out,
            kernel_size=filter_size,
            stride=stride,
            padding=padding,
            groups=groups,
            bias_attr=False)
        self.batch_norm = nn.BatchNorm2D(
            ch_out,
            weight_attr=ParamAttr(regularizer=L2Decay(0.)),
            bias_attr=ParamAttr(regularizer=L2Decay(0.)))

        self.act = act
示例#15
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 ksize,
                 stride,
                 groups=1,
                 bias=False,
                 act="silu"):
        super(BaseConv, self).__init__()
        self.conv = nn.Conv2D(
            in_channels,
            out_channels,
            kernel_size=ksize,
            stride=stride,
            padding=(ksize - 1) // 2,
            groups=groups,
            bias_attr=bias)
        self.bn = nn.BatchNorm2D(
            out_channels,
            weight_attr=ParamAttr(regularizer=L2Decay(0.0)),
            bias_attr=ParamAttr(regularizer=L2Decay(0.0)))
        self.act = get_activation(act)

        self._init_weights()
示例#16
0
 def __init__(self,
              in_channels,
              out_channels,
              kernel_size,
              stride=1,
              groups=1,
              act=None,
              name=None):
     super(ConvBNLayer, self).__init__()
     self.conv = nn.Conv2D(in_channels=in_channels,
                           out_channels=out_channels,
                           kernel_size=kernel_size,
                           stride=stride,
                           padding=(kernel_size - 1) // 2,
                           groups=groups,
                           weight_attr=ParamAttr(name=name + "_weights"),
                           bias_attr=False)
     bn_name = "bn_" + name
     self.bn = nn.BatchNorm(out_channels,
                            act=act,
                            param_attr=ParamAttr(name=bn_name + '_scale'),
                            bias_attr=ParamAttr(bn_name + '_offset'),
                            moving_mean_name=bn_name + '_mean',
                            moving_variance_name=bn_name + '_variance')
示例#17
0
    def __init__(self,
                 input_channels,
                 output_channels,
                 filter_size,
                 stride,
                 padding,
                 name=None):
        super(ConvBNLayer, self).__init__()

        self._conv = Conv2D(in_channels=input_channels,
                            out_channels=output_channels,
                            kernel_size=filter_size,
                            stride=stride,
                            padding=padding,
                            weight_attr=ParamAttr(name=name + ".conv.weights"),
                            bias_attr=False)

        bn_name = name + ".bn"
        self._bn = BatchNorm(num_channels=output_channels,
                             act="relu",
                             param_attr=ParamAttr(name=bn_name + ".scale"),
                             bias_attr=ParamAttr(name=bn_name + ".offset"),
                             moving_mean_name=bn_name + ".mean",
                             moving_variance_name=bn_name + ".var")
示例#18
0
 def __init__(self,
              in_features,
              hidden_features=None,
              out_features=None,
              act_layer=nn.GELU,
              drop=0.):
     super().__init__()
     out_features = out_features or in_features
     hidden_features = hidden_features or in_features
     trunc_norm = ParamAttr(initializer=nn.initializer.TruncatedNormal(
         std=0.02))
     self.fc1 = nn.Linear(in_features, hidden_features, trunc_norm)
     self.act = act_layer()
     self.fc2 = nn.Linear(hidden_features, out_features, trunc_norm)
     self.drop = nn.Dropout(drop)
示例#19
0
 def __init__(self,
              layer_num,
              ch_in,
              ch_out,
              norm_type='bn',
              lite_neck=False,
              name=None):
     super(ShortCut, self).__init__()
     shortcut_conv = nn.Sequential()
     for i in range(layer_num):
         fan_out = 3 * 3 * ch_out
         std = math.sqrt(2. / fan_out)
         in_channels = ch_in if i == 0 else ch_out
         shortcut_name = name + '.conv.{}'.format(i)
         if lite_neck:
             shortcut_conv.add_sublayer(
                 shortcut_name,
                 LiteConv(in_channels=in_channels,
                          out_channels=ch_out,
                          with_act=i < layer_num - 1,
                          norm_type=norm_type))
         else:
             shortcut_conv.add_sublayer(
                 shortcut_name,
                 nn.Conv2D(
                     in_channels=in_channels,
                     out_channels=ch_out,
                     kernel_size=3,
                     padding=1,
                     weight_attr=ParamAttr(initializer=Normal(0, std)),
                     bias_attr=ParamAttr(learning_rate=2.,
                                         regularizer=L2Decay(0.))))
             if i < layer_num - 1:
                 shortcut_conv.add_sublayer(shortcut_name + '.act',
                                            nn.ReLU())
     self.shortcut = self.add_sublayer('shortcut', shortcut_conv)
示例#20
0
    def __init__(self,
                 in_c,
                 out_c,
                 filter_size,
                 stride,
                 padding,
                 num_groups=1,
                 if_act=True,
                 act=None):
        super().__init__()

        self.conv = Conv2D(in_channels=in_c,
                           out_channels=out_c,
                           kernel_size=filter_size,
                           stride=stride,
                           padding=padding,
                           groups=num_groups,
                           bias_attr=False)
        self.bn = BatchNorm(num_channels=out_c,
                            act=None,
                            param_attr=ParamAttr(regularizer=L2Decay(0.0)),
                            bias_attr=ParamAttr(regularizer=L2Decay(0.0)))
        self.if_act = if_act
        self.act = _create_act(act)
示例#21
0
 def __init__(self, in_channels, name_list):
     super(Head, self).__init__()
     self.conv1 = nn.Conv2D(in_channels=in_channels,
                            out_channels=in_channels // 4,
                            kernel_size=3,
                            padding=1,
                            weight_attr=ParamAttr(name=name_list[0] +
                                                  '.w_0'),
                            bias_attr=False)
     self.conv_bn1 = nn.BatchNorm(
         num_channels=in_channels // 4,
         param_attr=ParamAttr(
             name=name_list[1] + '.w_0',
             initializer=paddle.nn.initializer.Constant(value=1.0)),
         bias_attr=ParamAttr(
             name=name_list[1] + '.b_0',
             initializer=paddle.nn.initializer.Constant(value=1e-4)),
         moving_mean_name=name_list[1] + '.w_1',
         moving_variance_name=name_list[1] + '.w_2',
         act='relu')
     self.conv2 = nn.Conv2DTranspose(
         in_channels=in_channels // 4,
         out_channels=in_channels // 4,
         kernel_size=2,
         stride=2,
         weight_attr=ParamAttr(
             name=name_list[2] + '.w_0',
             initializer=paddle.nn.initializer.KaimingUniform()),
         bias_attr=get_bias_attr(in_channels // 4, name_list[-1] + "conv2"))
     self.conv_bn2 = nn.BatchNorm(
         num_channels=in_channels // 4,
         param_attr=ParamAttr(
             name=name_list[3] + '.w_0',
             initializer=paddle.nn.initializer.Constant(value=1.0)),
         bias_attr=ParamAttr(
             name=name_list[3] + '.b_0',
             initializer=paddle.nn.initializer.Constant(value=1e-4)),
         moving_mean_name=name_list[3] + '.w_1',
         moving_variance_name=name_list[3] + '.w_2',
         act="relu")
     self.conv3 = nn.Conv2DTranspose(
         in_channels=in_channels // 4,
         out_channels=1,
         kernel_size=2,
         stride=2,
         weight_attr=ParamAttr(
             name=name_list[4] + '.w_0',
             initializer=paddle.nn.initializer.KaimingUniform()),
         bias_attr=get_bias_attr(in_channels // 4, name_list[-1] + "conv3"),
     )
示例#22
0
    def __init__(self, ch_ins, ch_out, up_strides, dcn_v2=True):
        super(IDAUp, self).__init__()
        for i in range(1, len(ch_ins)):
            ch_in = ch_ins[i]
            up_s = int(up_strides[i])
            fan_in = ch_in * 3 * 3
            stdv = 1. / math.sqrt(fan_in)
            proj = nn.Sequential(
                ConvNormLayer(ch_in,
                              ch_out,
                              filter_size=3,
                              stride=1,
                              use_dcn=dcn_v2,
                              bias_on=dcn_v2,
                              norm_decay=None,
                              dcn_lr_scale=1.,
                              dcn_regularizer=None,
                              initializer=Uniform(-stdv, stdv)), nn.ReLU())
            node = nn.Sequential(
                ConvNormLayer(ch_out,
                              ch_out,
                              filter_size=3,
                              stride=1,
                              use_dcn=dcn_v2,
                              bias_on=dcn_v2,
                              norm_decay=None,
                              dcn_lr_scale=1.,
                              dcn_regularizer=None,
                              initializer=Uniform(-stdv, stdv)), nn.ReLU())

            kernel_size = up_s * 2
            fan_in = ch_out * kernel_size * kernel_size
            stdv = 1. / math.sqrt(fan_in)
            up = nn.Conv2DTranspose(
                ch_out,
                ch_out,
                kernel_size=up_s * 2,
                stride=up_s,
                padding=up_s // 2,
                groups=ch_out,
                weight_attr=ParamAttr(initializer=Uniform(-stdv, stdv)),
                bias_attr=False)
            fill_up_weights(up)
            setattr(self, 'proj_' + str(i), proj)
            setattr(self, 'up_' + str(i), up)
            setattr(self, 'node_' + str(i), node)
示例#23
0
    def __init__(self,
                 num_channels: int,
                 num_filters: int,
                 filter_size: int,
                 stride: int = 1,
                 groups: int = 1,
                 name: str = None):
        super(ConvLayer, self).__init__()

        self._conv = Conv2d(in_channels=num_channels,
                            out_channels=num_filters,
                            kernel_size=filter_size,
                            stride=stride,
                            padding=(filter_size - 1) // 2,
                            groups=groups,
                            weight_attr=ParamAttr(name=name + "_weights"),
                            bias_attr=False)
示例#24
0
    def __init__(self, class_dim: int = 1000, load_checkpoint: str = None):
        super(MobileNet, self).__init__()

        self.class_dim = class_dim

        bottleneck_params_list = [(1, 16, 1, 1), (6, 24, 2, 2), (6, 32, 3, 2), (6, 64, 4, 2), (6, 96, 3, 1),
                                  (6, 160, 3, 2), (6, 320, 1, 1)]

        self.conv1 = ConvBNLayer(
            num_channels=3, num_filters=int(32), filter_size=3, stride=2, padding=1, name="conv1_1")

        self.block_list = []
        i = 1
        in_c = int(32)
        for layer_setting in bottleneck_params_list:
            t, c, n, s = layer_setting
            i += 1
            block = self.add_sublayer(
                "conv" + str(i), sublayer=InversiBlocks(in_c=in_c, t=t, c=int(c), n=n, s=s, name="conv" + str(i)))
            self.block_list.append(block)
            in_c = int(c)

        self.out_c = 1280
        self.conv9 = ConvBNLayer(
            num_channels=in_c, num_filters=self.out_c, filter_size=1, stride=1, padding=0, name="conv9")

        self.pool2d_avg = AdaptiveAvgPool2d(1)

        self.out = Linear(
            self.out_c, class_dim, weight_attr=ParamAttr(name="fc10_weights"), bias_attr=ParamAttr(name="fc10_offset"))

        if load_checkpoint is not None:
            model_dict = paddle.load(load_checkpoint)[0]
            self.set_dict(model_dict)
            print("load custom checkpoint success")

        else:
            checkpoint = os.path.join(self.directory, 'mobilenet_v2_imagenet.pdparams')
            if not os.path.exists(checkpoint):
                os.system(
                    'wget https://paddlehub.bj.bcebos.com/dygraph/image_classification/mobilenet_v2_imagenet.pdparams -O '
                    + checkpoint)
            model_dict = paddle.load(checkpoint)[0]
            self.set_dict(model_dict)
            print("load pretrained checkpoint success")
示例#25
0
    def __call__(self):
        if not hasattr(self, '_anchor_vars'):
            anchor_vars = []
            helper = LayerHelper('anchor_grid')
            for idx, l in enumerate(range(self.min_level, self.max_level + 1)):
                stride = 2**l
                anchors = self.make_grid(stride)
                var = helper.create_parameter(
                    attr=ParamAttr(name='anchors_{}'.format(idx)),
                    shape=anchors.shape,
                    dtype='float32',
                    stop_gradient=True,
                    default_initializer=NumpyArrayInitializer(anchors))
                anchor_vars.append(var)
                var.persistable = True
            self._anchor_vars = anchor_vars

        return self._anchor_vars
示例#26
0
    def __init__(self,
                 num_channels,
                 filter_size,
                 num_filters,
                 stride,
                 padding,
                 num_groups=1):
        super().__init__()

        self.conv = Conv2D(in_channels=num_channels,
                           out_channels=num_filters,
                           kernel_size=filter_size,
                           stride=stride,
                           padding=padding,
                           groups=num_groups,
                           weight_attr=ParamAttr(initializer=KaimingNormal()),
                           bias_attr=False)
        self.bn = BatchNorm(num_filters)
        self.relu = ReLU()
示例#27
0
    def __init__(self, scale=1.0, class_num=1000, return_patterns=None):
        super().__init__()
        self.scale = scale

        self.conv = ConvBNLayer(num_channels=3,
                                filter_size=3,
                                num_filters=int(32 * scale),
                                stride=2,
                                padding=1)

        #num_channels, num_filters1, num_filters2, num_groups, stride
        self.cfg = [[int(32 * scale), 32, 64, 32, 1],
                    [int(64 * scale), 64, 128, 64, 2],
                    [int(128 * scale), 128, 128, 128, 1],
                    [int(128 * scale), 128, 256, 128, 2],
                    [int(256 * scale), 256, 256, 256, 1],
                    [int(256 * scale), 256, 512, 256, 2],
                    [int(512 * scale), 512, 512, 512, 1],
                    [int(512 * scale), 512, 512, 512, 1],
                    [int(512 * scale), 512, 512, 512, 1],
                    [int(512 * scale), 512, 512, 512, 1],
                    [int(512 * scale), 512, 512, 512, 1],
                    [int(512 * scale), 512, 1024, 512, 2],
                    [int(1024 * scale), 1024, 1024, 1024, 1]]

        self.blocks = nn.Sequential(*[
            DepthwiseSeparable(num_channels=params[0],
                               num_filters1=params[1],
                               num_filters2=params[2],
                               num_groups=params[3],
                               stride=params[4],
                               scale=scale) for params in self.cfg
        ])

        self.avg_pool = AdaptiveAvgPool2D(1)
        self.flatten = Flatten(start_axis=1, stop_axis=-1)

        self.fc = Linear(int(1024 * scale),
                         class_num,
                         weight_attr=ParamAttr(initializer=KaimingNormal()))
        if return_patterns is not None:
            self.update_res(return_patterns)
            self.register_forward_post_hook(self._return_dict_hook)
示例#28
0
    def __init__(self, version, class_num=1000):
        super(SqueezeNet, self).__init__()
        self.version = version

        if self.version == "1.0":
            self._conv = Conv2D(3,
                                96,
                                7,
                                stride=2,
                                weight_attr=ParamAttr(name="conv1_weights"),
                                bias_attr=ParamAttr(name="conv1_offset"))
            self._pool = MaxPool2D(kernel_size=3, stride=2, padding=0)
            self._conv1 = MakeFire(96, 16, 64, 64, name="fire2")
            self._conv2 = MakeFire(128, 16, 64, 64, name="fire3")
            self._conv3 = MakeFire(128, 32, 128, 128, name="fire4")

            self._conv4 = MakeFire(256, 32, 128, 128, name="fire5")
            self._conv5 = MakeFire(256, 48, 192, 192, name="fire6")
            self._conv6 = MakeFire(384, 48, 192, 192, name="fire7")
            self._conv7 = MakeFire(384, 64, 256, 256, name="fire8")

            self._conv8 = MakeFire(512, 64, 256, 256, name="fire9")
        else:
            self._conv = Conv2D(3,
                                64,
                                3,
                                stride=2,
                                padding=1,
                                weight_attr=ParamAttr(name="conv1_weights"),
                                bias_attr=ParamAttr(name="conv1_offset"))
            self._pool = MaxPool2D(kernel_size=3, stride=2, padding=0)
            self._conv1 = MakeFire(64, 16, 64, 64, name="fire2")
            self._conv2 = MakeFire(128, 16, 64, 64, name="fire3")

            self._conv3 = MakeFire(128, 32, 128, 128, name="fire4")
            self._conv4 = MakeFire(256, 32, 128, 128, name="fire5")

            self._conv5 = MakeFire(256, 48, 192, 192, name="fire6")
            self._conv6 = MakeFire(384, 48, 192, 192, name="fire7")
            self._conv7 = MakeFire(384, 64, 256, 256, name="fire8")
            self._conv8 = MakeFire(512, 64, 256, 256, name="fire9")

        self._drop = Dropout(p=0.5, mode="downscale_in_infer")
        self._conv9 = Conv2D(512,
                             class_num,
                             1,
                             weight_attr=ParamAttr(name="conv10_weights"),
                             bias_attr=ParamAttr(name="conv10_offset"))
        self._avg_pool = AdaptiveAvgPool2D(1)
示例#29
0
    def __init__(self,
                 input_channels: int,
                 output_channels: int,
                 stride: int = 2,
                 name: str = None,
                 relu_first: bool = False):
        super(EntryFlowBottleneckBlock, self).__init__()
        self.relu_first = relu_first

        self._short = Conv2d(
            in_channels=input_channels,
            out_channels=output_channels,
            kernel_size=1,
            stride=stride,
            padding=0,
            weight_attr=ParamAttr(name + "_branch1_weights"),
            bias_attr=False)
        self._conv1 = SeparableConv(input_channels, output_channels, stride=1, name=name + "_branch2a_weights")
        self._conv2 = SeparableConv(output_channels, output_channels, stride=1, name=name + "_branch2b_weights")
        self._pool = MaxPool2d(kernel_size=3, stride=stride, padding=1)
示例#30
0
    def __init__(self, input_channels, output_channels1, output_channels2,
                 name):
        super(ExitFlowBottleneckBlock, self).__init__()

        self._short = Conv2d(in_channels=input_channels,
                             out_channels=output_channels2,
                             kernel_size=1,
                             stride=2,
                             padding=0,
                             weight_attr=ParamAttr(name + "_branch1_weights"),
                             bias_attr=False)
        self._conv_1 = SeparableConv(input_channels,
                                     output_channels1,
                                     stride=1,
                                     name=name + "_branch2a_weights")
        self._conv_2 = SeparableConv(output_channels1,
                                     output_channels2,
                                     stride=1,
                                     name=name + "_branch2b_weights")
        self._pool = MaxPool2d(kernel_size=3, stride=2, padding=1)