示例#1
0
    def __init__(self,
                 num_anchors,
                 num_classes,
                 in_channels_list,
                 focal_init=False,
                 lite=False,
                 large_kernel=False):
        super().__init__()
        self.num_classes = num_classes
        num_anchors = tuplify(num_anchors, len(in_channels_list))
        kernel_size = 5 if (lite and large_kernel) else 3
        self.loc_heads = nn.ModuleList([
            nn.Sequential(
                get_norm_layer("default", c),
                Conv2d(c,
                       n * 4,
                       kernel_size=kernel_size,
                       depthwise_separable=lite,
                       mid_norm_layer='default'))
            for c, n in zip(in_channels_list, num_anchors)
        ])
        self.cls_heads = nn.ModuleList([
            nn.Sequential(
                get_norm_layer("default", c),
                Conv2d(c,
                       n * num_classes,
                       kernel_size=kernel_size,
                       depthwise_separable=lite,
                       mid_norm_layer='default'))
            for c, n in zip(in_channels_list, num_anchors)
        ])

        if focal_init:
            for p in self.cls_heads:
                get_last_conv(p).bias.data.fill_(inverse_sigmoid(0.01))
示例#2
0
 def __init__(self, in_channels, out_channels, use_se, drop_path):
     super().__init__()
     self.bn1 = get_norm_layer(in_channels)
     self.nl1 = get_activation("default")
     self.conv1 = Conv2d(in_channels, out_channels, kernel_size=3)
     self.bn2 = get_norm_layer(out_channels)
     self.nl2 = get_activation("default")
     self.conv2 = Conv2d(out_channels, out_channels, kernel_size=3)
     if use_se:
         self.se = SEModule(out_channels, reduction=8)
     if drop_path:
         self.drop_path = DropPath(drop_path)
 def __init__(self, in_channels, out_channels, stride=1):
     super().__init__()
     self.conv = nn.Sequential(
         get_norm_layer('default', in_channels),
         Conv2d(in_channels,
                out_channels,
                kernel_size=3,
                stride=stride,
                bias=False),
         get_norm_layer('default', out_channels),
         get_activation('default'),
         Conv2d(out_channels, out_channels, kernel_size=3, bias=False),
         get_norm_layer('default', out_channels),
     )
     self.shortcut = Shortcut(in_channels, out_channels, stride)
示例#4
0
    def __init__(self, in_channels, out_channels, stride, groups, use_se):
        super().__init__()
        self.use_se = use_se

        self.conv1 = Conv2d(in_channels,
                            out_channels,
                            kernel_size=1,
                            norm_layer='default',
                            activation='default')
        self.conv2 = Conv2d(out_channels,
                            out_channels,
                            kernel_size=3,
                            stride=stride,
                            groups=groups,
                            norm_layer='default',
                            activation='default')
        if self.use_se:
            self.se = SE(out_channels, 4)
        self.conv3 = Conv2d(out_channels,
                            out_channels,
                            kernel_size=1,
                            norm_layer='default')
        if stride != 1 or in_channels != out_channels:
            layers = []
            if stride != 1:
                layers.append(nn.AvgPool2d(kernel_size=(2, 2), stride=2))
            layers.extend([
                Conv2d(in_channels, out_channels, kernel_size=1, bias=False),
                get_norm_layer(out_channels),
            ])
            self.shortcut = nn.Sequential(*layers)
        else:
            self.shortcut = nn.Identity()
        self.relu = get_activation('default')
示例#5
0
    def __init__(self,
                 in_channels,
                 channels,
                 out_channels,
                 kernel_size,
                 stride=1,
                 se_ratio=1 / 16):
        super().__init__()

        self.bn = get_norm_layer('default', in_channels)
        if in_channels != channels:
            self.expand = Conv2d(in_channels,
                                 channels,
                                 kernel_size=1,
                                 norm_layer='default',
                                 activation='default')

        self.dwconv = Conv2d(channels,
                             channels,
                             kernel_size,
                             stride=stride,
                             groups=channels,
                             norm_layer='default',
                             activation='default')

        if se_ratio:
            assert 0 < se_ratio < 1
            self.se = SEModule(channels, reduction=int(1 / se_ratio))

        if out_channels is not None:
            self.project = Conv2d(channels,
                                  out_channels,
                                  kernel_size=1,
                                  norm_layer='default')
        self.use_res_connect = stride == 1 and in_channels == out_channels
示例#6
0
    def __init__(self,
                 primitives=PRIMITIVES,
                 C=16,
                 num_stacked=5,
                 nodes=4,
                 num_classes=10,
                 tau=10.0):
        super().__init__()
        self.primitives = primitives
        self.C = C
        self.num_classes = num_classes
        self.num_stacked = num_stacked
        self.nodes = nodes
        self.tau = tau

        self.stem = Conv2d(3, C, kernel_size=3, norm_layer='default')
        for i in range(3):
            if i != 0:
                self.add_module("reduce%d" % i, ReductionCell(C, C * 2))
                C = C * 2
            stage = nn.ModuleList()
            for _ in range(num_stacked):
                stage.append(NormalCell(primitives, nodes, C))
            self.add_module("stage%d" % (i + 1), stage)

        self.post_activ = nn.Sequential(
            get_norm_layer(C),
            get_activation(),
        )
        self.avg_pool = nn.AdaptiveAvgPool2d(1)
        self.classifier = nn.Linear(C, num_classes)

        self._initialize_alphas()
示例#7
0
 def __init__(self, C_in, C_out):
     super().__init__()
     assert C_out % 2 == 0
     self.relu = nn.ReLU(inplace=False)
     self.conv_1 = Conv2d(C_in, C_out // 2, 1, stride=2, bias=False)
     self.conv_2 = Conv2d(C_in, C_out // 2, 1, stride=2, bias=False)
     self.bn = get_norm_layer(C_out)
示例#8
0
    def __init__(self, depth, k, num_classes=10, use_se=False, drop_path=0):
        super().__init__()
        num_blocks = (depth - 4) // 6
        self.conv = Conv2d(3, self.stages[0], kernel_size=3)

        self.layer1 = self._make_layer(self.stages[0] * 1,
                                       self.stages[1] * k,
                                       num_blocks,
                                       stride=1,
                                       use_se=use_se,
                                       drop_path=drop_path)
        self.layer2 = self._make_layer(self.stages[1] * k,
                                       self.stages[2] * k,
                                       num_blocks,
                                       stride=2,
                                       use_se=use_se,
                                       drop_path=drop_path)
        self.layer3 = self._make_layer(self.stages[2] * k,
                                       self.stages[3] * k,
                                       num_blocks,
                                       stride=2,
                                       use_se=use_se,
                                       drop_path=drop_path)

        self.bn = get_norm_layer(self.stages[3] * k)
        self.nl = get_activation('default')
        self.avgpool = nn.AdaptiveAvgPool2d(1)
        self.fc = nn.Linear(self.stages[3] * k, num_classes)
示例#9
0
 def __init__(self, C_in, C_out, kernel_size):
     super().__init__()
     self.op = nn.Sequential(
         get_activation(),
         Conv2d(C_in, C_out, kernel_size, bias=False),
         get_norm_layer(C_out),
     )
示例#10
0
    def __init__(self, start_channels, num_classes, block, widening_fractor,
                 depth):
        super().__init__()

        if block == 'basic':
            block = BasicBlock
            num_layers = [(depth - 2) // 6] * 3
        elif block == 'bottleneck':
            block = Bottleneck
            num_layers = [(depth - 2) // 9] * 3
        else:
            raise ValueError("invalid block type: %s" % block)

        strides = [1, 2, 2]

        self.add_channel = widening_fractor / sum(num_layers)
        self.in_channels = start_channels
        self.channels = start_channels

        layers = [
            Conv2d(3, start_channels, kernel_size=3, norm_layer='default')
        ]

        for n, s in zip(num_layers, strides):
            layers.append(self._make_layer(block, n, stride=s))

        self.features = nn.Sequential(*layers)
        assert (start_channels +
                widening_fractor) * block.expansion == self.in_channels
        self.post_activ = nn.Sequential(
            get_norm_layer(self.in_channels),
            get_activation('default'),
        )
        self.final_pool = nn.AdaptiveAvgPool2d(1)
        self.output = nn.Linear(self.in_channels, num_classes)
示例#11
0
 def __init__(self, primitives, C):
     super().__init__()
     self._ops = nn.ModuleList()
     for primitive in primitives:
         op = OPS[primitive](C, 1)
         if 'pool' in primitive:
             op = nn.Sequential(op, get_norm_layer(C))
         self._ops.append(op)
示例#12
0
 def __init__(self, C, stride):
     super().__init__()
     self._ops = nn.ModuleList()
     for primitive in PRIMITIVES:
         op = OPS[primitive](C, stride)
         if 'pool' in primitive:
             op = nn.Sequential(op, get_norm_layer(C))
         self._ops.append(op)
示例#13
0
 def __init__(self, in_channels, out_channels, kernel_size, stride=1):
     super(NasConv, self).__init__()
     self.activ = get_activation('default')
     self.conv = Conv2d(in_channels,
                        out_channels,
                        kernel_size,
                        stride,
                        bias=False)
     self.bn = get_norm_layer('default', out_channels)
示例#14
0
    def __init__(self, in_channels, out_channels, stride=1, use_se=False):
        super().__init__()
        self.use_se = use_se
        self.bn1 = get_norm_layer(in_channels)
        self.nl1 = get_activation("default")
        self.conv1 = Conv2d(in_channels,
                            out_channels,
                            kernel_size=3,
                            stride=stride)
        self.bn2 = get_norm_layer(out_channels)
        self.nl2 = get_activation("default")
        self.conv2 = Conv2d(out_channels, out_channels, kernel_size=3)
        if self.use_se:
            self.se = SEModule(out_channels, reduction=8)

        self.shortcut = Conv2d(in_channels,
                               out_channels,
                               kernel_size=1,
                               stride=stride)
示例#15
0
 def __init__(self, in_channels, channels, stride=1):
     super().__init__()
     out_channels = channels * self.expansion
     self.conv = nn.Sequential(
         get_norm_layer(in_channels),
         Conv2d(in_channels, channels, kernel_size=1, bias=False),
         get_norm_layer(channels),
         get_activation(),
         Conv2d(channels,
                channels,
                kernel_size=3,
                stride=stride,
                bias=False),
         get_norm_layer(channels),
         get_activation(),
         Conv2d(channels, out_channels, kernel_size=1, bias=False),
         get_norm_layer(out_channels),
     )
     self.shortcut = Shortcut(in_channels, out_channels, stride)
示例#16
0
 def __init__(self, in_channels, out_channels, kernel_size, stride):
     super().__init__()
     self.activ = get_activation("default")
     self.conv = Conv2d(in_channels=in_channels,
                        out_channels=out_channels,
                        kernel_size=kernel_size,
                        stride=stride,
                        bias=False,
                        depthwise_separable=True)
     self.bn = get_norm_layer('bn', out_channels)
示例#17
0
    def __init__(self, in_channels, out_channels):
        super().__init__()
        mid_channels = out_channels // 2

        self.activ = get_activation('default')
        self.path1 = NasPathBranch(in_channels=in_channels,
                                   out_channels=mid_channels)
        self.path2 = NasPathBranch(in_channels=in_channels,
                                   out_channels=mid_channels,
                                   extra_padding=True)
        self.bn = get_norm_layer('default', out_channels)
示例#18
0
    def __init__(self,
                 stem_channels=64,
                 mid_channels=(64, 80, 96, 112),
                 out_channels=(128, 256, 384, 512),
                 num_modules=(1, 1, 1, 1),
                 num_classes=1000):
        super().__init__()
        num_stages = 5
        assert len(mid_channels) == len(out_channels) == len(
            num_modules) == num_stages - 1

        self.features = nn.Sequential()
        self.features.add_module(
            "init_block",
            nn.Sequential(
                Conv2d(3,
                       stem_channels,
                       kernel_size=3,
                       stride=2,
                       norm_layer='default',
                       activation='default'),
                Conv2d(stem_channels,
                       stem_channels,
                       kernel_size=3,
                       norm_layer='default',
                       activation='default'),
                Conv2d(stem_channels,
                       stem_channels * 2,
                       kernel_size=3,
                       norm_layer='default',
                       activation='default'),
            ))
        in_channels = stem_channels * 2
        for i, m, o, n in zip(range(num_stages - 1), mid_channels,
                              out_channels, num_modules):
            stage = nn.Sequential()
            stage.add_module(
                "pool", nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True))
            for j in range(n):
                stage.add_module("unit%d" % (j + 1), OSA(in_channels, m, o))
                in_channels = o
            self.features.add_module("stage%d" % (i + 1), stage)

        self.features.add_module(
            "post_activ",
            nn.Sequential(
                get_norm_layer("default", in_channels),
                get_activation("default"),
            ))
        self.features.add_module("final_pool", nn.AdaptiveAvgPool2d(1))

        self.output = nn.Linear(in_features=in_channels,
                                out_features=num_classes)
    def __init__(self, C, layers, steps=4, multiplier=4, stem_multiplier=3, num_classes=10, tau=10.0):
        super().__init__()
        self.C = C
        self.num_classes = num_classes
        self.layers = layers
        self.steps = steps
        self.multiplier = multiplier
        self.tau = tau

        C_curr = stem_multiplier * C
        self.stem = nn.Sequential(
            Conv2d(3, C_curr, kernel_size=3, bias=False),
            get_norm_layer(C_curr),
        )

        C_prev_prev, C_prev, C_curr = C_curr, C_curr, C
        self.cells = nn.ModuleList()
        reduction_prev = False
        for i in range(layers):
            if i in [layers // 3, 2 * layers // 3]:
                C_curr *= 2
                reduction = True
            else:
                reduction = False
            if reduction:
                cell = ReductionCell(C_prev_prev, C_prev, C_curr)
            else:
                cell = NormalCell(steps, multiplier, C_prev_prev, C_prev, C_curr, reduction_prev)
            reduction_prev = reduction
            self.cells.append(cell)
            C_prev_prev, C_prev = C_prev, multiplier * C_curr

        self.post_activ = nn.Sequential(
            get_norm_layer(C_prev),
            get_activation(),
        )
        self.avg_pool = nn.AdaptiveAvgPool2d(1)
        self.classifier = nn.Linear(C_prev, num_classes)

        self._initialize_alphas()
    def __init__(self, C_prev_prev, C_prev, C):
        super().__init__()
        self.preprocess0 = ReLUConvBN(C_prev_prev, C, 1)
        self.preprocess1 = ReLUConvBN(C_prev, C, 1)

        self.branch_a1 = nn.Sequential(
            get_activation(),
            Conv2d(C, C, (1, 3), stride=(1, 2), groups=8, bias=False),
            Conv2d(C, C, (3, 1), stride=(2, 1), groups=8, bias=False),
            get_norm_layer(C, affine=True),
            get_activation(),
            Conv2d(C, C, 1),
            get_norm_layer(C, affine=True),
        )
        self.branch_a2 = nn.Sequential(
            nn.MaxPool2d(3, stride=2, padding=1),
            get_norm_layer(C, affine=True)
        )
        self.branch_b1 = nn.Sequential(
            get_activation(),
            Conv2d(C, C, (1, 3), stride=(1, 2), groups=8, bias=False),
            Conv2d(C, C, (3, 1), stride=(2, 1), groups=8, bias=False),
            get_norm_layer(C, affine=True),
            get_activation(),
            Conv2d(C, C, 1),
            get_norm_layer(C, affine=True),
        )
        self.branch_b2 = nn.Sequential(
            nn.MaxPool2d(3, stride=2, padding=1),
            get_norm_layer(C, affine=True)
        )
示例#21
0
    def __init__(self,
                 num_anchors,
                 num_classes,
                 in_channels_list,
                 focal_init=False):
        super().__init__()
        self.num_classes = num_classes
        num_anchors = tuplify(num_anchors, len(in_channels_list))
        self.loc_heads = nn.ModuleList([
            nn.Sequential(get_norm_layer("default", c),
                          Conv2d(c, n * 4, kernel_size=1))
            for c, n in zip(in_channels_list, num_anchors)
        ])
        self.cls_heads = nn.ModuleList([
            nn.Sequential(get_norm_layer("default", c),
                          Conv2d(c, n * num_classes, kernel_size=1))
            for c, n in zip(in_channels_list, num_anchors)
        ])

        if focal_init:
            for p in self.cls_heads:
                get_last_conv(p).bias.data.fill_(inverse_sigmoid(0.01))
示例#22
0
 def __init__(self, C_in, C_out, kernel_size, stride, padding):
     super().__init__()
     self.op = nn.Sequential(
         get_activation(),
         Conv2d(C_in,
                C_in,
                kernel_size=kernel_size,
                stride=stride,
                groups=C_in,
                bias=False),
         Conv2d(C_in, C_in, kernel_size=1, bias=False),
         get_norm_layer(C_in),
         get_activation(),
         nn.Conv2d(C_in,
                   C_in,
                   kernel_size=kernel_size,
                   stride=1,
                   padding=padding,
                   groups=C_in,
                   bias=False),
         nn.Conv2d(C_in, C_out, kernel_size=1, padding=0, bias=False),
         get_norm_layer(C_out),
     )
示例#23
0
 def __init__(self, C_in, C_out, kernel_size, stride, dilation):
     super().__init__()
     self.op = nn.Sequential(
         get_activation(),
         Conv2d(C_in,
                C_in,
                kernel_size=kernel_size,
                stride=stride,
                dilation=dilation,
                groups=C_in,
                bias=False),
         Conv2d(C_in, C_out, kernel_size=1, bias=False),
         get_norm_layer(C_out),
     )
示例#24
0
 def _make_head(self, num_layers, f_channels, out_channels):
     layers = []
     for i in range(num_layers):
         layers.append(
             MBConv(f_channels,
                    f_channels * self.expand_ratio,
                    f_channels,
                    kernel_size=5))
     layers.append(
         nn.Sequential(
             get_norm_layer('default', f_channels),
             Conv2d(f_channels, out_channels, kernel_size=1),
         ))
     return nn.Sequential(*layers)
    def __init__(self, num_anchors, num_classes, in_channels, lite=False, concat=True):
        super().__init__()
        self.num_classes = num_classes
        self.concat = concat
        num_anchors = _tuple(num_anchors, len(in_channels))
        self.preds = nn.ModuleList([
            nn.Sequential(
                get_norm_layer('default', c),
                Conv2d(c, n * (num_classes + 4), kernel_size=3, depthwise_separable=lite, mid_norm_layer='default')
            )
            for c, n in zip(in_channels, num_anchors)
        ])

        for p in self.preds:
            get_last_conv(p).bias.data[4:].fill_(inverse_sigmoid(0.01))
 def __init__(self, start_channels, num_classes, block, widening_fractor,
              num_layers):
     super().__init__()
     self.add_channel = widening_fractor / sum(num_layers)
     self.in_channels = start_channels
     self.channels = start_channels
     self.features = nn.Sequential(
         Conv2d(3, start_channels, kernel_size=3, norm_layer='default'),
         self._make_layer(block, num_layers[0], stride=1),
         self._make_layer(block, num_layers[1], stride=2),
         self._make_layer(block, num_layers[2], stride=2),
     )
     assert (start_channels +
             widening_fractor) * block.expansion == self.in_channels
     self.post_activ = nn.Sequential(
         get_norm_layer('default', self.in_channels),
         get_activation('default'),
     )
     self.final_pool = nn.AdaptiveAvgPool2d(1)
     self.output = nn.Linear(self.in_channels, num_classes)
示例#27
0
    def __init__(self,
                 stem_channels=64,
                 mid_channels=192,
                 growth_rate=48,
                 num_units=(6, 8, 8, 8)):
        super().__init__()

        self.features = nn.Sequential()
        self.features.add_module("init_block", StemBlock(stem_channels))
        in_channels = stem_channels * 2
        for i, n in enumerate(num_units):
            stage = nn.Sequential()
            if i != len(num_units) - 1:
                stage.add_module("trans", Transition(in_channels, in_channels))
            for j in range(n):
                stage.add_module("unit%d" % (j + 1),
                                 DenseUnit(in_channels, mid_channels, growth_rate))
                in_channels += growth_rate
            self.features.add_module("stage%d" % (i + 1), stage)
        self.features.add_module("post_activ", seq(
            ("bn", get_norm_layer("default", in_channels)),
            ("relu", get_activation("default")),
        ))
示例#28
0
    def __init__(self,
                 stem_channels=64,
                 mid_channels=192,
                 growth_rate=48,
                 num_units=(6, 8, 8, 8)):
        super().__init__()

        self.init_block = StemBlock(stem_channels)
        in_channels = stem_channels * 2
        out_channels = [in_channels]
        for i, n in enumerate(num_units):
            stage = nn.Sequential()
            stage.add_module("pool", nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True))
            for j in range(n):
                stage.add_module("unit%d" % (j + 1),
                                 DenseUnit(in_channels, mid_channels, growth_rate))
                in_channels += growth_rate
            if i != len(num_units) - 1:
                stage.add_module(
                    "trans", Transition(in_channels, in_channels))
            out_channels.append(in_channels)
            self.add_module("stage%d" % (i + 1), stage)
        self.post_activ = seq(
            ("bn", get_norm_layer("default", in_channels)),
            ("relu", get_activation("default")),
        )

        del self.stage4.pool

        self.trans = Transition(out_channels[-1], out_channels[-1])
        self.proj = Transition(out_channels[-1], 512)

        self.extra1 = BasicBlock(512, 512)
        self.extra2 = BasicBlock(512, 256)
        self.extra3 = BasicBlock(256, 256)
        self.extra4 = BasicBlock(256, 256)
        self.out_channels = [out_channels[-3], 512, 512, 256, 256, 256]
示例#29
0
    'sep_conv_3x3':
    lambda C, stride: SepConv(C, C, 3, stride, 1),
    'sep_conv_5x5':
    lambda C, stride: SepConv(C, C, 5, stride, 2),
    'sep_conv_7x7':
    lambda C, stride: SepConv(C, C, 7, stride, 3),
    'dil_conv_3x3':
    lambda C, stride: DilConv(C, C, 3, stride, 2),
    'dil_conv_5x5':
    lambda C, stride: DilConv(C, C, 5, stride, 4),
    'conv_7x1_1x7':
    lambda C, stride: nn.Sequential(
        get_activation(),
        Conv2d(C, C, (1, 7), stride=(1, stride), bias=False),
        Conv2d(C, C, (7, 1), stride=(stride, 1), bias=False),
        get_norm_layer(C),
    ),
}


class ReLUConvBN(nn.Module):
    def __init__(self, C_in, C_out, kernel_size):
        super().__init__()
        self.op = nn.Sequential(
            get_activation(),
            Conv2d(C_in, C_out, kernel_size, bias=False),
            get_norm_layer(C_out),
        )

    def forward(self, x):
        return self.op(x)