Exemple #1
0
 def __init__(self,
              cin,
              cout,
              kernel_size,
              stride,
              padding,
              output_padding=0,
              *args,
              **kwargs):
     super().__init__(*args, **kwargs)
     self.conv_block = nn.Sequential(
         nn.Conv2DTranspose(cin, cout, kernel_size, stride, padding,
                            output_padding), nn.BatchNorm2D(cout))
     self.act = nn.ReLU()
def make_layers(cfg, batch_norm=False):
    layers = []
    in_channels = 3
    for v in cfg:
        if v == 'M':
            layers += [nn.MaxPool2D(kernel_size=2, stride=2)]
        else:
            conv2d = nn.Conv2D(in_channels, v, kernel_size=3, padding=1)
            if batch_norm:
                layers += [conv2d, nn.BatchNorm2D(v), nn.ReLU()]
            else:
                layers += [conv2d, nn.ReLU()]
            in_channels = v
    return nn.Sequential(*layers)
Exemple #3
0
    def __init__(self, num_classes, in_channels):
        super().__init__()
        in_channels = in_channels[-1]
        inter_channels = in_channels // 4

        self.channel_conv = layers.ConvBNReLU(in_channels, inter_channels, 3)
        self.position_conv = layers.ConvBNReLU(in_channels, inter_channels, 3)
        self.pam = PAM(inter_channels)
        self.cam = CAM()
        self.conv1 = layers.ConvBNReLU(inter_channels, inter_channels, 3)
        self.conv2 = layers.ConvBNReLU(inter_channels, inter_channels, 3)

        self.aux_head = nn.Sequential(nn.Dropout2D(0.1),
                                      nn.Conv2D(in_channels, num_classes, 1))

        self.aux_head_pam = nn.Sequential(
            nn.Dropout2D(0.1), nn.Conv2D(inter_channels, num_classes, 1))

        self.aux_head_cam = nn.Sequential(
            nn.Dropout2D(0.1), nn.Conv2D(inter_channels, num_classes, 1))

        self.cls_head = nn.Sequential(
            nn.Dropout2D(0.1), nn.Conv2D(inter_channels, num_classes, 1))
Exemple #4
0
    def test_apply_init_weight(self):
        with fluid.dygraph.guard():
            net = LeNetDygraph()
            net.eval()

            net_layers = nn.Sequential(*list(net.children()))
            net_layers.eval()

            x = paddle.rand([2, 1, 28, 28])

            y1 = net(x)
            y2 = net_layers(x)

            np.testing.assert_allclose(y1.numpy(), y2.numpy())
Exemple #5
0
def conv3x3_block(in_channels, out_channels, stride=1):
    n = 3 * 3 * out_channels
    w = math.sqrt(2. / n)
    conv_layer = nn.Conv2D(
        in_channels,
        out_channels,
        kernel_size=3,
        stride=stride,
        padding=1,
        weight_attr=nn.initializer.Normal(
            mean=0.0, std=w),
        bias_attr=nn.initializer.Constant(0))
    block = nn.Sequential(conv_layer, nn.BatchNorm2D(out_channels), nn.ReLU())
    return block
 def __init__(self, in_channels, channels, se_ratio=12):
     super(SE, self).__init__()
     self.avg_pool = nn.AdaptiveAvgPool2D(1)
     self.fc = nn.Sequential(
         nn.Conv2D(in_channels,
                   channels // se_ratio,
                   kernel_size=1,
                   padding=0),
         nn.BatchNorm2D(channels // se_ratio),
         nn.ReLU(),
         nn.Conv2D(channels // se_ratio, channels, kernel_size=1,
                   padding=0),
         nn.Sigmoid(),
     )
Exemple #7
0
 def _make_conv_level(self, ch_in, ch_out, conv_num, stride=1):
     modules = []
     for i in range(conv_num):
         modules.extend([
             ConvNormLayer(
                 ch_in,
                 ch_out,
                 filter_size=3,
                 stride=stride if i == 0 else 1,
                 bias_on=False,
                 norm_decay=None), nn.ReLU()
         ])
         ch_in = ch_out
     return nn.Sequential(*modules)
Exemple #8
0
 def __init__(self, depth=34, residual_root=False):
     super(DLA, self).__init__()
     levels, channels = DLA_cfg[depth]
     if depth == 34:
         block = BasicBlock
     self.channels = channels
     self.base_layer = nn.Sequential(
         ConvNormLayer(
             3,
             channels[0],
             filter_size=7,
             stride=1,
             bias_on=False,
             norm_decay=None),
         nn.ReLU())
     self.level0 = self._make_conv_level(channels[0], channels[0], levels[0])
     self.level1 = self._make_conv_level(
         channels[0], channels[1], levels[1], stride=2)
     self.level2 = Tree(
         levels[2],
         block,
         channels[1],
         channels[2],
         2,
         level_root=False,
         root_residual=residual_root)
     self.level3 = Tree(
         levels[3],
         block,
         channels[2],
         channels[3],
         2,
         level_root=True,
         root_residual=residual_root)
     self.level4 = Tree(
         levels[4],
         block,
         channels[3],
         channels[4],
         2,
         level_root=True,
         root_residual=residual_root)
     self.level5 = Tree(
         levels[5],
         block,
         channels[4],
         channels[5],
         2,
         level_root=True,
         root_residual=residual_root)
Exemple #9
0
    def __init__(self, config, with_efeat=True):
        super(CatGINConv, self).__init__()
        log.info("layer_type is %s" % self.__class__.__name__)
        self.config = config
        emb_dim = self.config.emb_dim

        self.with_efeat = with_efeat

        self.mlp = nn.Sequential(Linear(emb_dim, emb_dim),
                                 batch_norm_1d(emb_dim), nn.Swish(),
                                 Linear(emb_dim, emb_dim))

        self.send_mlp = nn.Sequential(nn.Linear(2 * emb_dim, 2 * emb_dim),
                                      nn.Swish(), Linear(2 * emb_dim, emb_dim))

        self.eps = self.create_parameter(
            shape=[1, 1],
            dtype='float32',
            default_initializer=nn.initializer.Constant(value=0))

        if self.with_efeat:
            self.bond_encoder = getattr(ME, self.config.bond_enc_type,
                                        ME.BondEncoder)(emb_dim=emb_dim)
Exemple #10
0
 def __init__(self, z_dim, channels_img, features_g):
     super(Generator, self).__init__()
     self.gen=nn.Sequential(
         # Input: N x z_dim x 1 x 1
         self._block(z_dim , features_g*16 , 4, 1, 0),   # N x f_g x 16 x 16
         self._block(features_g*16 , features_g*8  , 4, 2, 1),   # N x f_g x 32 x 32
         self._block(features_g*8  , features_g*4  , 4, 2, 1),   # N x f_g x 64 x 64
         self._block(features_g*4  , features_g*2  , 4, 2, 1),   # N x f_g x 128 x 128
         nn.Conv2DTranspose(
             features_g*2, channels_img, kernel_size=4, stride=2, padding=1, bias_attr=False, 
             weight_attr=paddle.ParamAttr(initializer=conv_initializer() )
         ),
         nn.Tanh()   # [-1, 1]
     )
Exemple #11
0
 def __init__(self, dim_in, dim_out):
     super(StyleResidualBlock, self).__init__()
     self.block1 = nn.Sequential(
         nn.Conv2D(dim_in,
                   dim_out,
                   kernel_size=3,
                   stride=1,
                   padding=1,
                   bias_attr=False), PONO())
     ks = 3
     pw = ks // 2
     self.beta1 = nn.Conv2D(dim_in, dim_out, kernel_size=ks, padding=pw)
     self.gamma1 = nn.Conv2D(dim_in, dim_out, kernel_size=ks, padding=pw)
     self.block2 = nn.Sequential(
         nn.ReLU(),
         nn.Conv2D(dim_out,
                   dim_out,
                   kernel_size=3,
                   stride=1,
                   padding=1,
                   bias_attr=False), PONO())
     self.beta2 = nn.Conv2D(dim_in, dim_out, kernel_size=ks, padding=pw)
     self.gamma2 = nn.Conv2D(dim_in, dim_out, kernel_size=ks, padding=pw)
Exemple #12
0
def export(args):
    model = paddlevision.models.__dict__[args.model](
        pretrained=args.pretrained, num_classes=args.num_classes)
    model = nn.Sequential(model, nn.Softmax())
    model.eval()

    model = paddle.jit.to_static(
        model,
        input_spec=[
            InputSpec(shape=[None, 3, args.img_size, args.img_size],
                      dtype='float32')
        ])
    paddle.jit.save(model, os.path.join(args.save_inference_dir, "inference"))
    print(f"inference model has been saved into {args.save_inference_dir}")
Exemple #13
0
    def __init__(self,
                 cfg,
                 ch_in,
                 ch_out,
                 act,
                 norm_type,
                 name,
                 data_format='NCHW'):
        """
        PPYOLODetBlockCSP layer

        Args:
            cfg (list): layer configs for this block
            ch_in (int): input channel
            ch_out (int): output channel
            act (str): default mish
            name (str): block name
            data_format (str): data format, NCHW or NHWC
        """
        super(PPYOLODetBlockCSP, self).__init__()
        self.data_format = data_format
        self.conv1 = ConvBNLayer(ch_in,
                                 ch_out,
                                 1,
                                 padding=0,
                                 act=act,
                                 norm_type=norm_type,
                                 name=name + '.left',
                                 data_format=data_format)
        self.conv2 = ConvBNLayer(ch_in,
                                 ch_out,
                                 1,
                                 padding=0,
                                 act=act,
                                 norm_type=norm_type,
                                 name=name + '.right',
                                 data_format=data_format)
        self.conv3 = ConvBNLayer(ch_out * 2,
                                 ch_out * 2,
                                 1,
                                 padding=0,
                                 act=act,
                                 norm_type=norm_type,
                                 name=name,
                                 data_format=data_format)
        self.conv_module = nn.Sequential()
        for idx, (layer_name, layer, args, kwargs) in enumerate(cfg):
            kwargs.update(name=name + layer_name, data_format=data_format)
            self.conv_module.add_sublayer(layer_name, layer(*args, **kwargs))
Exemple #14
0
    def __init__(self,
                 aspp_ratios,
                 in_channels,
                 out_channels,
                 align_corners,
                 use_sep_conv=False,
                 image_pooling=False):
        super().__init__()

        self.align_corners = align_corners
        self.aspp_blocks = nn.LayerList()

        for ratio in aspp_ratios:
            if use_sep_conv and ratio > 1:
                conv_func = layers.SeparableConvBNReLU
            else:
                conv_func = layers.ConvBNReLU

            block = conv_func(in_channels=in_channels,
                              out_channels=out_channels,
                              kernel_size=1 if ratio == 1 else 3,
                              dilation=ratio,
                              padding=0 if ratio == 1 else ratio)
            self.aspp_blocks.append(block)

        out_size = len(self.aspp_blocks)

        if image_pooling:
            self.global_avg_pool = nn.Sequential(
                nn.AdaptiveAvgPool2D(output_size=(1, 1)),
                layers.ConvBNReLU(in_channels,
                                  out_channels,
                                  kernel_size=1,
                                  bias_attr=False))
            out_size += 1
        self.image_pooling = image_pooling

        self.edge_conv = layers.ConvBNReLU(1,
                                           out_channels,
                                           kernel_size=1,
                                           bias_attr=False)
        out_size += 1

        self.conv_bn_relu = layers.ConvBNReLU(in_channels=out_channels *
                                              out_size,
                                              out_channels=out_channels,
                                              kernel_size=1)

        self.dropout = nn.Dropout(p=0.1)  # drop rate
    def __init__(self, backbone, output_stride, BatchNorm):
        super(ASPP, self).__init__()
        if backbone == 'drn':
            inplanes = 512
        elif backbone == 'mobilenet':
            inplanes = 320
        else:
            inplanes = 2048
        if output_stride == 16:
            dilations = [1, 6, 12, 18]
        elif output_stride == 8:
            dilations = [1, 12, 24, 36]
        else:
            raise NotImplementedError

        self.aspp1 = _ASPPModule(inplanes,
                                 256,
                                 1,
                                 padding=0,
                                 dilation=dilations[0],
                                 BatchNorm=BatchNorm)
        self.aspp2 = _ASPPModule(inplanes,
                                 256,
                                 3,
                                 padding=dilations[1],
                                 dilation=dilations[1],
                                 BatchNorm=BatchNorm)
        self.aspp3 = _ASPPModule(inplanes,
                                 256,
                                 3,
                                 padding=dilations[2],
                                 dilation=dilations[2],
                                 BatchNorm=BatchNorm)
        self.aspp4 = _ASPPModule(inplanes,
                                 256,
                                 3,
                                 padding=dilations[3],
                                 dilation=dilations[3],
                                 BatchNorm=BatchNorm)

        self.global_avg_pool = nn.Sequential(
            nn.AdaptiveAvgPool2D((1, 1)),
            nn.Conv2D(inplanes, 256, 1, stride=1, bias_attr=False),
            BatchNorm(256), nn.ReLU())
        self.conv1 = nn.Conv2D(1280, 256, 1, bias_attr=False)
        self.bn1 = BatchNorm(256)
        self.relu = nn.ReLU(True)
        self.dropout = nn.Dropout(0.1)
        self._init_weight()
Exemple #16
0
    def __init__(self,
                 ch_in,
                 ch_out=64,
                 conv_num=2,
                 dcn_head=False,
                 lite_head=False,
                 norm_type='bn'):
        super(WHHead, self).__init__()
        head_conv = nn.Sequential()
        for i in range(conv_num):
            name = 'conv.{}'.format(i)
            if lite_head:
                lite_name = 'wh.' + name
                head_conv.add_sublayer(
                    lite_name,
                    LiteConv(in_channels=ch_in if i == 0 else ch_out,
                             out_channels=ch_out,
                             norm_type=norm_type))
                head_conv.add_sublayer(lite_name + '.act', nn.ReLU6())
            else:
                if dcn_head:
                    head_conv.add_sublayer(
                        name,
                        DeformableConvV2(
                            in_channels=ch_in if i == 0 else ch_out,
                            out_channels=ch_out,
                            kernel_size=3,
                            weight_attr=ParamAttr(
                                initializer=Normal(0, 0.01))))
                else:
                    head_conv.add_sublayer(
                        name,
                        nn.Conv2D(
                            in_channels=ch_in if i == 0 else ch_out,
                            out_channels=ch_out,
                            kernel_size=3,
                            padding=1,
                            weight_attr=ParamAttr(initializer=Normal(0, 0.01)),
                            bias_attr=ParamAttr(learning_rate=2.,
                                                regularizer=L2Decay(0.))))
                head_conv.add_sublayer(name + '.act', nn.ReLU())

        self.feat = head_conv
        self.head = nn.Conv2D(
            in_channels=ch_out,
            out_channels=4,
            kernel_size=1,
            weight_attr=ParamAttr(initializer=Normal(0, 0.001)),
            bias_attr=ParamAttr(learning_rate=2., regularizer=L2Decay(0.)))
Exemple #17
0
    def build_conv_block(self, dim, padding_type, norm_layer, use_dropout,
                         use_bias):
        """Construct a convolutional block.

        Parameters:
            dim (int)           -- the number of channels in the conv layer.
            padding_type (str)  -- the name of padding layer: reflect | replicate | zero
            norm_layer          -- normalization layer
            use_dropout (bool)  -- if use dropout layers.
            use_bias (bool)     -- if the conv layer uses bias or not

        Returns a conv block (with a conv layer, a normalization layer, and a non-linearity layer (ReLU))
        """
        conv_block = []
        p = 0
        if padding_type == 'reflect':
            conv_block += [ReflectionPad2d(1)]
        elif padding_type == 'replicate':
            conv_block += [ReplicationPad2d(1)]
        elif padding_type == 'zero':
            p = 1
        else:
            raise NotImplementedError('padding [%s] is not implemented' %
                                      padding_type)

        conv_block += [
            nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias_attr=use_bias),
            norm_layer(dim),
            nn.ReLU()
        ]
        if use_dropout:
            conv_block += [Dropout(0.5)]

        p = 0
        if padding_type == 'reflect':
            conv_block += [ReflectionPad2d(1)]
        elif padding_type == 'replicate':
            conv_block += [ReplicationPad2d(1)]
        elif padding_type == 'zero':
            p = 1
        else:
            raise NotImplementedError('padding [%s] is not implemented' %
                                      padding_type)
        conv_block += [
            nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias_attr=use_bias),
            norm_layer(dim)
        ]

        return nn.Sequential(*conv_block)
Exemple #18
0
    def __init__(self,
                 in_channel=256,
                 out_channel=256,
                 num_convs=4,
                 norm_type=None):
        super(MaskFeat, self).__init__()
        self.num_convs = num_convs
        self.in_channel = in_channel
        self.out_channel = out_channel
        self.norm_type = norm_type
        fan_conv = out_channel * 3 * 3
        fan_deconv = out_channel * 2 * 2

        mask_conv = nn.Sequential()
        if norm_type == 'gn':
            for i in range(self.num_convs):
                conv_name = 'mask_inter_feat_{}'.format(i + 1)
                mask_conv.add_sublayer(
                    conv_name,
                    ConvNormLayer(ch_in=in_channel if i == 0 else out_channel,
                                  ch_out=out_channel,
                                  filter_size=3,
                                  stride=1,
                                  norm_type=self.norm_type,
                                  initializer=KaimingNormal(fan_in=fan_conv)))
                mask_conv.add_sublayer(conv_name + 'act', nn.ReLU())
        else:
            for i in range(self.num_convs):
                conv_name = 'mask_inter_feat_{}'.format(i + 1)
                conv = nn.Conv2D(
                    in_channels=in_channel if i == 0 else out_channel,
                    out_channels=out_channel,
                    kernel_size=3,
                    padding=1,
                    weight_attr=paddle.ParamAttr(initializer=KaimingNormal(
                        fan_in=fan_conv)))
                mask_conv.add_sublayer(conv_name, conv)
                mask_conv.add_sublayer(conv_name + 'act', nn.ReLU())
        mask_conv.add_sublayer(
            'conv5_mask',
            nn.Conv2DTranspose(
                in_channels=self.in_channel,
                out_channels=self.out_channel,
                kernel_size=2,
                stride=2,
                weight_attr=paddle.ParamAttr(initializer=KaimingNormal(
                    fan_in=fan_deconv))))
        mask_conv.add_sublayer('conv5_mask' + 'act', nn.ReLU())
        self.upsample = mask_conv
Exemple #19
0
    def __init__(self, num_filters, has_se=False):
        super().__init__()

        self.basic_block_list = nn.LayerList()

        for i in range(len(num_filters)):
            self.basic_block_list.append(
                nn.Sequential(*[
                    BasicBlock(num_channels=num_filters[i],
                               num_filters=num_filters[i],
                               has_se=has_se) for j in range(4)
                ]))

        self.fuse_func = FuseLayers(in_channels=num_filters,
                                    out_channels=num_filters)
 def __init__(self, channels, kernel_size, stride):
     super(Involution, self).__init__()
     self.kernel_size = kernel_size
     self.stride = stride
     self.channels = channels
     reduction_ratio = 4
     self.group_channels = 16
     self.groups = self.channels // self.group_channels
     self.conv1 = nn.Sequential(
         ('conv',
          nn.Conv2D(in_channels=channels,
                    out_channels=channels // reduction_ratio,
                    kernel_size=1,
                    bias_attr=False)),
         ('bn', nn.BatchNorm2D(channels // reduction_ratio)),
         ('activate', nn.ReLU()))
     self.conv2 = nn.Sequential(
         ('conv',
          nn.Conv2D(in_channels=channels // reduction_ratio,
                    out_channels=kernel_size**2 * self.groups,
                    kernel_size=1,
                    stride=1)))
     if stride > 1:
         self.avgpool = nn.AvgPool2D(stride, stride)
 def _make_stage(self, planes, num_blocks, stride):
     strides = [stride] + [1] * (num_blocks - 1)
     blocks = []
     for stride in strides:
         cur_groups = self.override_groups_map.get(self.cur_layer_idx, 1)
         blocks.append(
             RepVGGBlock(in_channels=self.in_planes,
                         out_channels=planes,
                         kernel_size=3,
                         stride=stride,
                         padding=1,
                         groups=cur_groups))
         self.in_planes = planes
         self.cur_layer_idx += 1
     return nn.Sequential(*blocks)
Exemple #22
0
    def __init__(self,
                 in_channels,
                 hid_channels,
                 out_channels,
                 with_avg_pool=True):
        super(NonLinearNeckV1, self).__init__()
        self.with_avg_pool = with_avg_pool
        if with_avg_pool:
            self.avgpool = nn.AdaptiveAvgPool2D((1, 1))

        self.mlp = nn.Sequential(nn.Linear(in_channels, hid_channels),
                                 nn.ReLU(),
                                 nn.Linear(hid_channels, out_channels))

        init_backbone_weight(self.mlp)
 def __init__(self, in_c, out_c, spatial):
     super(GradualStyleBlock, self).__init__()
     self.out_c = out_c
     self.spatial = spatial
     num_pools = int(np.log2(spatial))
     modules = []
     modules += [nn.Conv2D(in_c, out_c, kernel_size=3, stride=2, padding=1),
                 nn.LeakyReLU()]
     for i in range(num_pools - 1):
         modules += [
             nn.Conv2D(out_c, out_c, kernel_size=3, stride=2, padding=1),
             nn.LeakyReLU()
         ]
     self.convs = nn.Sequential(*modules)
     self.linear = EqualLinear(out_c, out_c, lr_mul=1)
Exemple #24
0
 def _make_layers(self,
                  in_dims,
                  out_dims,
                  kernel_size,
                  num_groups,
                  weight_attr=None,
                  bias_attr=None):
     return nn.Sequential(
         nn.Conv2D(in_dims,
                   out_dims,
                   kernel_size,
                   padding=kernel_size // 2,
                   weight_attr=weight_attr,
                   bias_attr=bias_attr), nn.GroupNorm(num_groups, out_dims),
         nn.ReLU())
Exemple #25
0
    def __init__(self, n_in, n_out, n_layers):
        super(DecoderBlock, self).__init__()
        n_hid = n_out // 4
        self.post_gain = 1 / (n_layers**2)

        self.id_path = nn.Conv2D(n_in, n_out,
                                 1) if n_in != n_out else Identity()
        self.res_path = nn.Sequential(
            ('relu_1', nn.ReLU()), ('conv_1', nn.Conv2D(n_in, n_hid, 1)),
            ('relu_2', nn.ReLU()),
            ('conv_2', nn.Conv2D(n_hid, n_hid, 3, padding=1)),
            ('relu_3', nn.ReLU()),
            ('conv_3', nn.Conv2D(n_hid, n_hid, 3, padding=1)),
            ('relu_4', nn.ReLU()),
            ('conv_4', nn.Conv2D(n_hid, n_out, 3, padding=1)))
Exemple #26
0
    def _make_layer(self, block, inplanes, planes, blocks, stride=1):
        downsample = None
        if stride != 1 or inplanes != planes * block.expansion:
            downsample = nn.Sequential(
                nn.Conv2D(inplanes,
                          planes * block.expansion,
                          kernel_size=1,
                          stride=stride,
                          bias_attr=False),
                self.norm_layer(planes * block.expansion),
            )

        layers = []
        layers.append(
            block(inplanes,
                  planes,
                  stride,
                  downsample=downsample,
                  norm_layer=self.norm_layer))
        inplanes = planes * block.expansion
        for i in range(1, blocks):
            layers.append(block(inplanes, planes, norm_layer=self.norm_layer))

        return nn.Sequential(*layers)
Exemple #27
0
def DWConvLayer(in_channels,
                out_channels,
                kernel_size=3,
                stride=1,
                bias_attr=False):
    layer = nn.Sequential(('dwconv',
                           nn.Conv2D(in_channels,
                                     out_channels,
                                     kernel_size=kernel_size,
                                     stride=stride,
                                     padding=1,
                                     groups=out_channels,
                                     bias_attr=bias_attr)),
                          ('norm', nn.BatchNorm2D(out_channels)))
    return layer
Exemple #28
0
 def __init__(self, inp, oup, kernel, stride):
     super(ConvDw, self).__init__()
     self.conv = nn.Sequential(
         nn.Conv2D(inp,
                   inp,
                   kernel,
                   stride, (kernel - 1) // 2,
                   groups=inp,
                   bias_attr=False),
         nn.BatchNorm2D(num_features=inp, epsilon=1e-05, momentum=0.1),
         nn.ReLU(),
         nn.Conv2D(inp, oup, 1, 1, 0, bias_attr=False),
         nn.BatchNorm2D(num_features=oup, epsilon=1e-05, momentum=0.1),
         nn.ReLU(),
     )
Exemple #29
0
    def __init__(self, channel, layer_idx):
        super(GELayer, self).__init__()

        # Kernel size w.r.t each layer for global depth-wise convolution
        kernel_size = [-1, 56, 28, 14, 7][layer_idx]

        self.conv = nn.Sequential(
            nn.Conv2D(channel,
                      channel,
                      kernel_size=kernel_size,
                      groups=channel),
            nn.BatchNorm2D(channel),
        )

        self.activation = nn.Sigmoid()
Exemple #30
0
    def _make_one_branch(self, branch_index, block, num_blocks, num_channels,
                         stride=1):
        downsample = None
        if stride != 1 or \
                self.num_inchannels[branch_index] != num_channels[branch_index] * block.expansion:
            downsample = nn.Sequential(
                nn.Conv2D(self.num_inchannels[branch_index],
                          num_channels[branch_index] * block.expansion,
                          kernel_size=1, stride=stride, bias_attr=False),
                self.norm_layer(num_channels[branch_index] * block.expansion),
            )

        layers = []
        layers.append(block(self.num_inchannels[branch_index],
                            num_channels[branch_index], stride,
                            downsample=downsample, norm_layer=self.norm_layer))
        self.num_inchannels[branch_index] = \
            num_channels[branch_index] * block.expansion
        for i in range(1, num_blocks[branch_index]):
            layers.append(block(self.num_inchannels[branch_index],
                                num_channels[branch_index],
                                norm_layer=self.norm_layer))

        return nn.Sequential(*layers)