Пример #1
0
    def __init__(self, backbone, in_channels_list, num_classes, f_channels_list=None, up_mode='deconv', dropout=0):
        super().__init__()
        self.backbone = backbone
        if up_mode == 'deconv':
            decoder_block = DecoderDeconvBlock
        elif up_mode == 'upsample':
            decoder_block = DecoderUpsamplingBlock
        else:
            raise ValueError

        if f_channels_list is None:
            f_channels_list = in_channels_list[:3]

        self.block2 = decoder_block(in_channels_list[2], in_channels_list[3], f_channels_list[2])
        self.block1 = decoder_block(in_channels_list[1], in_channels_list[2], f_channels_list[1])
        self.block0 = decoder_block(in_channels_list[0], in_channels_list[1], f_channels_list[0])

        self.side0 = Conv2d(f_channels_list[0], 1, 1,
                            norm='default', act='default')
        self.side1 = Conv2d(f_channels_list[1], 1, 1,
                            norm='default', act='default')
        self.side2 = Conv2d(f_channels_list[2], 1, 1,
                            norm='default', act='default')
        self.side3 = Conv2d(in_channels_list[3], num_classes, 1)

        self.dropout = nn.Dropout2d(dropout)
        self.pred = nn.Conv2d(4 * num_classes, num_classes, 1, groups=num_classes)
Пример #2
0
 def __init__(self, in_channels, f_channels=256, num_scales=6, no_padding=(-1, 0), lite=False):
     super().__init__()
     self.num_scales = num_scales
     self.down = nn.ModuleList([])
     self.lat = nn.ModuleList([
         Conv2d(in_channels, f_channels, kernel_size=3, stride=1,
                norm_layer='default', activation='default', depthwise_separable=lite)
         if in_channels != f_channels else Identity()
     ])
     self.out = nn.ModuleList([
         Conv2d(f_channels, f_channels // 2, kernel_size=1, stride=1,
                norm_layer='default', activation='default')
     ])
     for i in range(num_scales - 1):
         self.down.append(Conv2d(in_channels, f_channels, kernel_size=3, stride=2,
                                 norm_layer='default', activation='default', depthwise_separable=lite))
         self.lat.append(Conv2d(f_channels, f_channels, kernel_size=3, stride=1,
                                norm_layer='default', activation='default', depthwise_separable=lite))
         self.out.append(Conv2d(f_channels, f_channels // 2, kernel_size=1, stride=1,
                                norm_layer='default', activation='default'))
         in_channels = f_channels
     no_padding = tuplify(no_padding, 2)
     for i in range(no_padding[0], 0):
         l = self.down[i][0]
         if lite:
             l = l[0]
         p = l.padding
         l.padding = (0, p[1])
     for i in range(no_padding[1], 0):
         l = self.down[i][0]
         if lite:
             l = l[0]
         p = l.padding
         l.padding = (p[0], 0)
Пример #3
0
    def __init__(self,
                 in_channels,
                 channels,
                 out_channels,
                 kernel_size,
                 stride,
                 activation='relu6',
                 with_se=True):
        super().__init__()
        self.with_se = with_se
        if in_channels != channels:
            self.expand = Conv2d(in_channels,
                                 channels,
                                 kernel_size=1,
                                 norm_layer='default',
                                 activation=activation)
        else:
            self.expand = Identity()

        self.dwconv = Conv2d(channels,
                             channels,
                             kernel_size,
                             stride,
                             groups=channels,
                             norm_layer='default',
                             activation=activation)

        if self.with_se:
            self.se = SELayerM(channels, 4)

        self.project = Conv2d(channels,
                              out_channels,
                              kernel_size=1,
                              norm_layer='default')
        self.use_res_connect = stride == 1 and in_channels == out_channels
    def __init__(self, mult=0.5, num_classes=1000, with_se=False):
        super().__init__()
        num_layers = [4, 8, 4]
        self.num_layers = num_layers
        channels = self.cfg[mult]
        self.out_channels = channels
        block = SEBlock if with_se else BasicBlock

        self.conv1 = Conv2d(3,
                            channels[0],
                            kernel_size=3,
                            stride=2,
                            norm_layer='default',
                            activation='default')
        self.maxpool = nn.MaxPool2d(
            kernel_size=3,
            stride=2,
            padding=1,
        )
        self.stage2 = self._make_layer(block, num_layers[0], channels[0],
                                       channels[1])
        self.stage3 = self._make_layer(block, num_layers[1], channels[1],
                                       channels[2])
        self.stage4 = self._make_layer(block, num_layers[2], channels[2],
                                       channels[3])
        self.conv5 = Conv2d(channels[3], channels[4], kernel_size=1)

        self.avgpool = nn.AdaptiveAvgPool2d(1)
        self.fc = nn.Linear(channels[4], num_classes)
Пример #5
0
    def __init__(self, num_classes=10, width_mult=1.0):
        super().__init__()
        block = InvertedResidual
        in_channels = 16
        last_channels = 1280
        inverted_residual_setting = [
            # k, e, o,  se,     nl,  s,
            [3, 16, 16, False, 'relu6', 1],
            [3, 64, 24, False, 'relu6', 1],
            [3, 72, 24, False, 'relu6', 1],
            [5, 72, 40, True, 'relu6', 1],
            [5, 120, 40, True, 'relu6', 1],
            [5, 120, 40, True, 'relu6', 1],
            [3, 240, 80, False, 'hswish', 2],
            [3, 200, 80, False, 'hswish', 1],
            [3, 184, 80, False, 'hswish', 1],
            [3, 184, 80, False, 'hswish', 1],
            [3, 480, 112, True, 'hswish', 1],
            [3, 672, 112, True, 'hswish', 1],
            [5, 672, 160, True, 'hswish', 2],
            [5, 960, 160, True, 'hswish', 1],
            [5, 960, 160, True, 'hswish', 1],
        ]

        last_channels = _make_divisible(
            last_channels * width_mult) if width_mult > 1.0 else last_channels

        # building first layer
        features = [
            Conv2d(3,
                   in_channels,
                   kernel_size=3,
                   stride=1,
                   norm='default',
                   act='hswish')
        ]
        # building inverted residual blocks
        for k, exp, c, se, nl, s in inverted_residual_setting:
            out_channels = _make_divisible(c * width_mult)
            exp_channels = _make_divisible(exp * width_mult)
            features.append(
                block(in_channels, exp_channels, out_channels, k, s, nl, se))
            in_channels = out_channels
        # building last several layers
        features.extend([
            Conv2d(in_channels,
                   exp_channels,
                   kernel_size=1,
                   norm='default',
                   act='hswish'),
        ])
        in_channels = exp_channels
        # make it nn.Sequential
        self.features = nn.Sequential(*features)

        # building classifier
        self.classifier = nn.Sequential(
            nn.AdaptiveAvgPool2d(1),
            Conv2d(in_channels, last_channels, kernel_size=1, act='hswish'),
            Conv2d(last_channels, num_classes, kernel_size=1))
Пример #6
0
 def __init__(self, C_in, C_out):
     super().__init__()
     assert C_out % 2 == 0
     self.relu = nn.ReLU(inplace=False)
     self.conv_1 = Conv2d(C_in, C_out // 2, 1, stride=2, bias=False)
     self.conv_2 = Conv2d(C_in, C_out // 2, 1, stride=2, bias=False)
     self.bn = get_norm_layer(C_out)
 def __init__(self, in_channels, shuffle_groups=2):
     super().__init__()
     channels = in_channels // 2
     self.conv1 = Conv2d(
         channels,
         channels,
         kernel_size=1,
         norm_layer='default',
         activation='default',
     )
     self.conv2 = Conv2d(
         channels,
         channels,
         kernel_size=3,
         groups=channels,
         norm_layer='default',
     )
     self.conv3 = Conv2d(
         channels,
         channels,
         kernel_size=1,
         norm_layer='default',
         activation='default',
     )
     self.shuffle = ShuffleBlock(shuffle_groups)
Пример #8
0
 def __init__(self, in_channels, out_channels, shuffle_groups=2):
     super().__init__()
     channels = out_channels - in_channels // 2
     self.conv1 = Conv2d(
         in_channels // 2,
         channels,
         kernel_size=1,
         norm='default',
         act='default',
     )
     self.conv2 = Conv2d(
         channels,
         channels,
         kernel_size=3,
         groups=channels,
         norm='default',
     )
     self.conv3 = Conv2d(
         channels,
         channels,
         kernel_size=1,
         norm='default',
     )
     self.shortcut = nn.Sequential()
     self.se = SEModule(channels, reduction=2)
     if in_channels != out_channels:
         self.shortcut = Conv2d(in_channels // 2,
                                channels,
                                kernel_size=1,
                                norm='default')
     self.relu = Act('default')
     self.shuffle = ShuffleBlock(shuffle_groups)
Пример #9
0
    def __init__(self,
                 in_channels,
                 channels,
                 out_channels,
                 kernel_size,
                 stride=1,
                 se_ratio=1 / 16):
        super().__init__()

        self.bn = get_norm_layer('default', in_channels)
        if in_channels != channels:
            self.expand = Conv2d(in_channels,
                                 channels,
                                 kernel_size=1,
                                 norm_layer='default',
                                 activation='default')

        self.dwconv = Conv2d(channels,
                             channels,
                             kernel_size,
                             stride=stride,
                             groups=channels,
                             norm_layer='default',
                             activation='default')

        if se_ratio:
            assert 0 < se_ratio < 1
            self.se = SEModule(channels, reduction=int(1 / se_ratio))

        if out_channels is not None:
            self.project = Conv2d(channels,
                                  out_channels,
                                  kernel_size=1,
                                  norm_layer='default')
        self.use_res_connect = stride == 1 and in_channels == out_channels
Пример #10
0
 def __init__(self, in_channels, shuffle_groups=2, with_se=False):
     super().__init__()
     self.with_se = with_se
     channels = in_channels // 2
     self.conv1 = Conv2d(
         channels,
         channels,
         kernel_size=1,
         norm_layer='default',
         activation='default',
     )
     self.conv2 = Conv2d(
         channels,
         channels,
         kernel_size=5,
         groups=channels,
         norm_layer='default',
     )
     self.conv3 = Conv2d(
         channels,
         channels,
         kernel_size=1,
         norm_layer='default',
         activation='default',
     )
     if with_se:
         self.se = SEModule(channels, reduction=8)
     self.shuffle = ShuffleBlock(shuffle_groups)
Пример #11
0
 def __init__(self, in_channels, out_channels, residual=True):
     super().__init__()
     self.residual = residual
     self.conv1 = Conv2d(in_channels, out_channels // 2, kernel_size=1,
                         norm_layer='default', activation='leaky_relu')
     self.conv2 = Conv2d(out_channels // 2, out_channels, kernel_size=3,
                         norm_layer='default', activation='leaky_relu')
Пример #12
0
    def __init__(self, num_classes=1000):
        super().__init__()
        num_layers = [5, 5, 7]
        self.num_layers = num_layers
        channels = [64, 32, 64, 128, 256, 128]
        self.channels = channels

        self.conv1 = Conv2d(3,
                            channels[0],
                            kernel_size=3,
                            stride=2,
                            norm_layer='default',
                            activation='default')
        self.maxpool = nn.MaxPool2d(
            kernel_size=3,
            stride=2,
            padding=1,
        )
        self.stage21 = BasicBlock(channels[0], channels[1])
        self.stage22 = self._make_layer(num_layers[0], channels[1])
        self.stage31 = DownBlock(channels[1])
        self.stage32 = self._make_layer(num_layers[1], channels[2])
        self.stage41 = DownBlock(channels[2])
        self.stage42 = self._make_layer(num_layers[2], channels[3])
        self.stage51 = DownBlock(channels[3])
        self.stage52 = Conv2d(channels[4],
                              channels[5],
                              kernel_size=1,
                              norm_layer='default',
                              activation='default')
        self.avgpool = nn.AdaptiveAvgPool2d(1)
        self.fc = nn.Linear(channels[5], num_classes)
Пример #13
0
 def __init__(self, in_channels, out_channels, stride):
     super().__init__()
     channels = out_channels // self.expansion
     self.conv1 = Conv2d(in_channels,
                         channels,
                         kernel_size=1,
                         norm_layer='default',
                         activation='default')
     self.conv2 = Conv2d(channels,
                         channels,
                         kernel_size=3,
                         stride=stride,
                         norm_layer='default',
                         activation='default')
     self.conv3 = Conv2d(channels,
                         out_channels,
                         kernel_size=1,
                         norm_layer='default')
     self.shortcut = Conv2d(
         in_channels,
         out_channels,
         kernel_size=1,
         stride=stride,
         norm_layer='default') if stride != 1 else nn.Identity()
     self.relu = get_activation('default')
Пример #14
0
 def __init__(self, in_channels, use_se=False):
     super().__init__()
     assert in_channels % 2 == 0
     channels = in_channels // 2
     branch = [
         Conv2d(channels,
                channels,
                kernel_size=1,
                activation='default',
                norm_layer='default'),
         Conv2d(channels,
                channels,
                kernel_size=3,
                groups=channels,
                activation=None,
                norm_layer='default'),
         Conv2d(channels,
                channels,
                kernel_size=1,
                activation=None,
                norm_layer='default'),
     ]
     if use_se:
         branch.append(SELayer(channels, reduction=2))
     self.branch = nn.Sequential(*branch)
     self.relu = get_activation()
Пример #15
0
 def __init__(self, in_channels, out_channels, last=False):
     super().__init__()
     self.last = last
     self.conv1 = nn.Sequential(
         Conv2d(in_channels,
                out_channels,
                kernel_size=5,
                norm_layer='default',
                activation='relu',
                depthwise_separable=True),
         Conv2d(out_channels,
                out_channels,
                kernel_size=5,
                norm_layer='default',
                depthwise_separable=True),
         SEModule(out_channels, reduction=4),
     )
     if not last:
         self.deconv1 = Conv2d(out_channels,
                               out_channels,
                               kernel_size=4,
                               stride=2,
                               norm_layer='default',
                               depthwise_separable=True,
                               transposed=True)
     self.nl1 = get_activation('default')
     self.conv2 = Conv2d(out_channels,
                         out_channels,
                         kernel_size=5,
                         norm_layer='default',
                         activation='default',
                         depthwise_separable=True)
Пример #16
0
    def __init__(self, in_channels, out_channels, stride, groups, use_se):
        super().__init__()
        self.use_se = use_se

        self.conv1 = Conv2d(in_channels,
                            out_channels,
                            kernel_size=1,
                            norm_layer='default',
                            activation='default')
        self.conv2 = Conv2d(out_channels,
                            out_channels,
                            kernel_size=3,
                            stride=stride,
                            groups=groups,
                            norm_layer='default',
                            activation='default')
        if self.use_se:
            self.se = SE(out_channels, 4)
        self.conv3 = Conv2d(out_channels,
                            out_channels,
                            kernel_size=1,
                            norm_layer='default')
        if stride != 1 or in_channels != out_channels:
            layers = []
            if stride != 1:
                layers.append(nn.AvgPool2d(kernel_size=(2, 2), stride=2))
            layers.extend([
                Conv2d(in_channels, out_channels, kernel_size=1, bias=False),
                get_norm_layer(out_channels),
            ])
            self.shortcut = nn.Sequential(*layers)
        else:
            self.shortcut = nn.Identity()
        self.relu = get_activation('default')
Пример #17
0
 def __init__(self,
              stem_channels,
              channels_per_stage,
              units_per_stage,
              final_channels,
              num_classes=10,
              use_se=True):
     super().__init__()
     self.stem = Conv2d(3,
                        stem_channels,
                        kernel_size=3,
                        act='default',
                        norm='default')
     # block = ResUnit if residual else BasicUnit
     block = BasicUnit
     self.stage1 = _make_layer(block, units_per_stage[0], stem_channels,
                               channels_per_stage[0], 1, use_se)
     self.stage2 = _make_layer(block, units_per_stage[1],
                               channels_per_stage[0], channels_per_stage[1],
                               2, use_se)
     self.stage3 = _make_layer(block, units_per_stage[2],
                               channels_per_stage[1], channels_per_stage[2],
                               2, use_se)
     self.final_block = Conv2d(channels_per_stage[2],
                               final_channels,
                               kernel_size=1,
                               act='default',
                               norm='default')
     self.final_pool = nn.AdaptiveAvgPool2d(1)
     self.fc = nn.Linear(final_channels, num_classes)
Пример #18
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 kernel_size,
                 stride,
                 expand_ratio,
                 se_ratio=0.25,
                 drop_connect=0.2):
        super().__init__()

        channels = in_channels * expand_ratio
        self.drop_connect = drop_connect
        has_se = se_ratio is not None and 0 < se_ratio < 1

        self.conv = nn.Sequential(
            Conv2d(in_channels,
                   channels,
                   kernel_size=1,
                   norm_layer='default',
                   activation='swish') if expand_ratio != 1 else Identity(),
            Conv2d(channels,
                   channels,
                   kernel_size,
                   stride,
                   groups=channels,
                   norm_layer='default',
                   activation='swish'),
            SEModule(channels, int(in_channels *
                                   se_ratio)) if has_se else Identity(),
            Conv2d(channels, out_channels, kernel_size=1,
                   norm_layer='default'),
        )
        self.use_res_connect = stride == 1 and in_channels == out_channels
Пример #19
0
def _make_head(f_channels, num_layers, out_channels, lite):
    layers = []
    for i in range(num_layers):
        layers.append(Conv2d(f_channels, f_channels, kernel_size=3,
                             norm_layer='default', activation='default', depthwise_separable=lite))
    layers.append(Conv2d(f_channels, out_channels, kernel_size=3))
    return nn.Sequential(*layers)
Пример #20
0
    def __init__(self, num_anchors, num_classes, in_channels_list, focal_init=False, lite=False, large_kernel=False):
        super().__init__()
        self.num_classes = num_classes
        num_anchors = tuplify(num_anchors, len(in_channels_list))
        kernel_size = 5 if (lite and large_kernel) else 3
        self.loc_heads = nn.ModuleList([
            nn.Sequential(
                Norm("default", c),
                Conv2d(c, n * 4, kernel_size=kernel_size,
                       depthwise_separable=lite, mid_norm_layer='default')
            )
            for c, n in zip(in_channels_list, num_anchors)
        ])
        self.cls_heads = nn.ModuleList([
            nn.Sequential(
                Norm("default", c),
                Conv2d(c, n * num_classes, kernel_size=kernel_size,
                       depthwise_separable=lite, mid_norm_layer='default')
            )
            for c, n in zip(in_channels_list, num_anchors)
        ])

        if focal_init:
            for p in self.cls_heads:
                get_last_conv(p).bias.data.fill_(inverse_sigmoid(0.01))
    def __init__(self, version, num_classes=1000, with_se=False):
        super().__init__()
        channels = self.cfg[version][0]
        self.channels = channels
        num_layers = self.cfg[version][1]
        self.num_layers = num_layers
        block = SEResBlock if with_se else ResBlock

        self.conv1 = Conv2d(3,
                            channels[0],
                            kernel_size=3,
                            stride=2,
                            norm_layer='default',
                            activation='default')
        self.maxpool = nn.MaxPool2d(
            kernel_size=3,
            stride=2,
            padding=1,
        )
        self.stage2 = self._make_layer(block,
                                       num_layers[0],
                                       channels[0],
                                       channels[1],
                                       stride=1)
        self.stage3 = self._make_layer(block, num_layers[1], channels[1],
                                       channels[2])
        self.stage4 = self._make_layer(block, num_layers[2], channels[2],
                                       channels[3])
        self.stage5 = self._make_layer(block, num_layers[3], channels[3],
                                       channels[4])
        self.conv6 = Conv2d(channels[4], channels[5], kernel_size=1)

        self.avgpool = nn.AdaptiveAvgPool2d(1)
        self.fc = nn.Linear(channels[5], num_classes)
Пример #22
0
    def __init__(self, backbone, side_in_channels, num_classes, dropout=0):
        super().__init__()
        self.backbone = backbone
        self.side1 = Conv2d(side_in_channels[0],
                            1,
                            1,
                            norm='default',
                            act='default')
        self.side2 = Conv2d(side_in_channels[1],
                            1,
                            1,
                            norm='default',
                            act='default')
        self.side3 = Conv2d(side_in_channels[2],
                            1,
                            1,
                            norm='default',
                            act='default')
        self.side5 = Conv2d(side_in_channels[3], num_classes, 1)

        self.dropout = nn.Dropout2d(dropout)
        self.pred = nn.Conv2d(4 * num_classes,
                              num_classes,
                              1,
                              groups=num_classes)
 def __init__(self, in_channels, out_channels, shuffle_groups=2):
     super().__init__()
     channels = out_channels - in_channels // 2
     self.conv1 = Conv2d(
         in_channels // 2,
         channels,
         kernel_size=1,
         norm_layer='default',
         activation='default',
     )
     self.conv2 = Conv2d(
         channels,
         channels,
         kernel_size=3,
         groups=channels,
         norm_layer='default',
     )
     self.conv3 = Conv2d(
         channels,
         channels,
         kernel_size=1,
         norm_layer='default',
     )
     self.shortcut = nn.Sequential()
     if in_channels != out_channels:
         self.shortcut = Conv2d(in_channels // 2,
                                channels,
                                kernel_size=1,
                                norm_layer='default')
     self.relu = get_activation('default')
     self.shuffle = ShuffleBlock(shuffle_groups)
Пример #24
0
    def __init__(self, num_classes=1000, version=49, **kwargs):
        super().__init__()
        num_layers = [4, 8, 4]
        self.num_layers = num_layers
        channels = self.cfg[version]
        self.channels = channels

        self.conv1 = Conv2d(
            3, channels[0], kernel_size=3, stride=2,
            act='default', **kwargs
        )
        self.maxpool = nn.MaxPool2d(
            kernel_size=3, stride=2, padding=1,
        )
        self.stage2 = self._make_layer(
            num_layers[0], channels[0], channels[1], **kwargs)
        self.stage3 = self._make_layer(
            num_layers[1], channels[1], channels[2], **kwargs)
        self.stage4 = self._make_layer(
            num_layers[2], channels[2], channels[3], **kwargs)
        if len(self.channels) == 5:
            self.conv5 = Conv2d(
                channels[3], channels[4], kernel_size=1, **kwargs)

        self.avgpool = nn.AdaptiveAvgPool2d(1)
        self.fc = nn.Linear(channels[-1], num_classes)
Пример #25
0
 def __init__(self,
              in_channels1,
              in_channels2,
              f_channels,
              lite=False,
              aggregate='add'):
     super().__init__()
     self.aggregate = aggregate
     self.lat = Conv2d(in_channels1,
                       f_channels,
                       kernel_size=1,
                       norm='default')
     self.deconv = Conv2d(in_channels2,
                          f_channels,
                          kernel_size=4,
                          stride=2,
                          norm='default',
                          depthwise_separable=lite,
                          transposed=True)
     channels = f_channels * 2 if aggregate == 'cat' else f_channels
     self.conv = Conv2d(channels,
                        f_channels,
                        kernel_size=5 if lite else 3,
                        norm='default',
                        act='default',
                        depthwise_separable=lite)
Пример #26
0
 def __init__(self, in_channels, f_channels, last=False):
     super().__init__()
     kernel_size = 3
     self.last = last
     self.conv1 = nn.Sequential(
         Conv2d(in_channels,
                f_channels,
                kernel_size=kernel_size,
                norm_layer='default',
                activation='default'),
         Conv2d(f_channels,
                f_channels,
                kernel_size=kernel_size,
                norm_layer='default'),
     )
     # if not last:
     #     self.deconv = Conv2d(f_channels, f_channels, kernel_size=4, stride=2,
     #                          norm_layer='default', transposed=True)
     self.conv2 = nn.Sequential(
         get_activation('default'),
         Conv2d(f_channels,
                f_channels,
                kernel_size=kernel_size,
                norm_layer='default',
                activation='default'))
Пример #27
0
    def __init__(self, in_channels, out_channels, kernel_size, stride, expand_ratio, se_ratio=0.25, drop_rate=0.2):
        super().__init__()

        channels = in_channels * expand_ratio
        use_se = se_ratio is not None and 0 < se_ratio < 1
        self.use_res_connect = stride == 1 and in_channels == out_channels

        layers = nn.Sequential()
        if expand_ratio != 1:
            layers.add_module(
                "expand", Conv2d(in_channels, channels, kernel_size=1,
                                 norm_layer='default', activation='swish'))

        layers.add_module(
            "dwconv", Conv2d(channels, channels, kernel_size, stride, groups=channels,
                             norm_layer='default', activation='swish'))

        if use_se:
            layers.add_module(
                "se", SEModule(channels, int(in_channels * se_ratio)))

        layers.add_module(
            "project", Conv2d(channels, out_channels, kernel_size=1,
                              norm_layer='default'))

        if self.use_res_connect and drop_rate:
            layers.add_module(
                "drop_path", DropPath(drop_rate))

        self.layers = layers
Пример #28
0
    def __init__(self, in_channels, out_channels, stride=1, expansion=4):
        super().__init__()
        self.stride = stride
        self.in_channels = in_channels
        self.out_channels = out_channels
        channels = out_channels // expansion

        self.conv1 = Conv2d(in_channels,
                            channels,
                            kernel_size=1,
                            norm_layer='default',
                            activation='default')
        self.conv2 = Conv2d(channels,
                            channels,
                            kernel_size=3,
                            stride=stride,
                            norm_layer='default',
                            activation='default')

        self.conv3 = Conv2d(channels,
                            out_channels,
                            kernel_size=1,
                            norm_layer='default')
        self.relu3 = get_activation('default')

        self.downsample = None
        if stride != 1 or in_channels != out_channels:
            self.downsample = Conv2d(in_channels,
                                     out_channels,
                                     kernel_size=1,
                                     stride=stride,
                                     norm_layer='default')
Пример #29
0
    def __init__(self, in_channels, out_channels, stride, groups, base_width):
        super().__init__()

        D = math.floor(out_channels // self.expansion * (base_width / 64))

        self.conv1 = Conv2d(in_channels,
                            D * groups,
                            kernel_size=1,
                            norm_layer='default',
                            activation='default')
        self.conv2 = Conv2d(D * groups,
                            D * groups,
                            kernel_size=3,
                            stride=stride,
                            groups=groups,
                            norm_layer='default',
                            activation='default')
        self.conv3 = Conv2d(D * groups,
                            out_channels,
                            kernel_size=1,
                            norm_layer='default')
        self.shortcut = Conv2d(
            in_channels,
            out_channels,
            kernel_size=1,
            stride=stride,
            norm_layer='default'
        ) if stride != 1 or in_channels != out_channels else nn.Identity()
        self.relu = get_activation('default')
Пример #30
0
 def __init__(self, in_channels1, in_channels2, out_channels):
     super().__init__()
     self.upsample = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False)
     self.conv1 = Conv2d(in_channels1 + in_channels2, out_channels, kernel_size=3,
                         norm='default', act='default')
     self.conv2 = Conv2d(out_channels, out_channels, kernel_size=3,
                         norm='default', act='default')