Exemple #1
0
    def __init__(self, in_ch, out_ch, ks, stride=1, mul_factor=6):
        super(MBConv, self).__init__()
        self.in_ch = in_ch
        self.out_ch = out_ch
        self.ks = ks
        self.stride = stride
        self.factor = mul_factor

        hid = in_ch * mul_factor
        self.branch = tnn.CondSeq(
            tu.xavier(tnn.Conv1x1(in_ch, hid, bias=False)),
            nn.BatchNorm2d(hid), tnn.HardSwish(),
            tu.xavier(
                nn.Conv2d(hid,
                          hid,
                          ks,
                          stride=stride,
                          padding=ks // 2,
                          groups=hid,
                          bias=False)), nn.BatchNorm2d(hid), tnn.HardSwish(),
            tnn.SEBlock(hid, reduction=4), tu.xavier(tnn.Conv1x1(hid, out_ch)),
            nn.BatchNorm2d(out_ch))

        self.shortcut = tnn.CondSeq()

        if stride != 1:
            self.shortcut.add_module(
                'pool', nn.AvgPool2d(stride, stride, ceil_mode=True))

        if in_ch != out_ch:
            self.shortcut.add_module('conv',
                                     tnn.Conv1x1(in_ch, out_ch, bias=False))
            self.shortcut.add_module('bn', nn.BatchNorm2d(out_ch))
Exemple #2
0
 def __init__(self, in_ch, hid_ch, out_ch, ks, sz):
     super(ResBlk, self).__init__()
     self.go = tnn.CondSeq(
         nn.BatchNorm2d(in_ch),
         nn.ReLU(inplace=False),
         tnn.Conv1x1(in_ch, hid_ch),
         nn.BatchNorm2d(hid_ch),
         nn.ReLU(inplace=True),
         tnn.TopLeftConv2d(hid_ch, hid_ch, ks, center=True, bias=sz),
         nn.BatchNorm2d(hid_ch),
         nn.ReLU(inplace=True),
         tnn.Conv1x1(hid_ch, out_ch),
     )
Exemple #3
0
    def __init__(self, ch, n_down):
        super(AttentionBlock, self).__init__()
        self.pre = Block(ch, ch)
        self.post = Block(ch, ch)
        self.trunk = tnn.CondSeq(Block(ch, ch), Block(ch, ch))

        soft = None
        for _ in range(n_down):
            soft = UBlock(ch, soft)
        self.mask = tnn.CondSeq(soft, nn.BatchNorm2d(ch), nn.ReLU(True),
                                tu.kaiming(tnn.Conv1x1(ch, ch, bias=False)),
                                nn.BatchNorm2d(ch), nn.ReLU(True),
                                tu.kaiming(tnn.Conv1x1(ch, ch)),
                                tnn.HardSigmoid())
Exemple #4
0
    def __init__(self,
                 ch: int,
                 n_down: int,
                 n_trunk: int = 2,
                 n_post: int = 1,
                 n_pre: int = 1,
                 n_att_conv: int = 2,
                 with_skips: bool = True) -> None:
        super(AttentionBlock, self).__init__()
        self.pre = tnn.CondSeq(*[Block(ch, ch) for _ in range(n_pre)])
        self.post = tnn.CondSeq(*[Block(ch, ch) for _ in range(n_post)])
        self.trunk = tnn.CondSeq(*[Block(ch, ch) for _ in range(n_trunk)])

        soft: nn.Module = UBlock1(ch)
        for _ in range(n_down - 1):
            soft = UBlock(ch, soft, with_skip=with_skips)

        if n_down >= 0:
            conv1 = [soft]
            for i in range(n_att_conv):
                conv1 += [
                    nn.BatchNorm2d(ch),
                    nn.ReLU(True),
                    tu.kaiming(tnn.Conv1x1(ch, ch, bias=(i != n_att_conv - 1)))
                ]
            conv1.append(nn.Sigmoid())

            self.mask = tnn.CondSeq(*conv1)
        else:
            self.mask = None
Exemple #5
0
    def __init__(self, in_ch, num_classes, B=0):

        def ch(ch):
            return int(ch * 1.1**B) // 8 * 8

        def n_layers(d):
            return int(math.ceil(d * 1.2**B))

        def r():
            return int(224 * 1.15**B)

        super(EfficientNet, self).__init__(
            # Stage 1
            # nn.UpsamplingBilinear2d(size=(r(), r())),
            tu.kaiming(tnn.Conv3x3(in_ch, ch(32), stride=2, bias=False)),
            nn.BatchNorm2d(ch(32)),
            tnn.HardSwish(),

            # Stage 2
            MBConv(ch(32), ch(16), 3, mul_factor=1),
            *[
                MBConv(ch(16), ch(16), 3, mul_factor=1)
                for _ in range(n_layers(1) - 1)
            ],

            # Stage 3
            MBConv(ch(16), ch(24), 3, stride=2),
            *[MBConv(ch(24), ch(24), 3) for _ in range(n_layers(2) - 1)],

            # Stage 4
            MBConv(ch(24), ch(40), 5, stride=2),
            *[MBConv(ch(40), ch(40), 5) for _ in range(n_layers(2) - 1)],

            # Stage 5
            MBConv(ch(40), ch(80), 3, stride=2),
            *[MBConv(ch(80), ch(80), 3) for _ in range(n_layers(3) - 1)],

            # Stage 6
            MBConv(ch(80), ch(112), 5),
            *[MBConv(ch(112), ch(112), 5) for _ in range(n_layers(3) - 1)],

            # Stage 7
            MBConv(ch(112), ch(192), 5, stride=2),
            *[MBConv(ch(192), ch(192), 5) for _ in range(n_layers(4) - 1)],

            # Stage 8
            MBConv(ch(192), ch(320), 3),
            *[MBConv(ch(320), ch(320), 3) for _ in range(n_layers(1) - 1)],
            tu.kaiming(tnn.Conv1x1(ch(320), ch(1280), bias=False)),
            nn.BatchNorm2d(ch(1280)),
            tnn.HardSwish(),
            nn.AdaptiveAvgPool2d(1),
            tnn.Reshape(-1),
            tu.xavier(nn.Linear(ch(1280), num_classes)))
Exemple #6
0
def VggGeneratorDebug(in_noise=32, out_ch=3):
    """
    A not so small Vgg net image GAN generator for testing purposes

    Args:
        in_noise (int): dimension of the input noise
        out_ch (int): number of output channels (3 for RGB images)

    Returns:
        a VGG instance
    """
    return nn.Sequential(
        kaiming(nn.Linear(in_noise, 128 * 16)), tnn.Reshape(128, 4, 4),
        nn.LeakyReLU(0.2, inplace=True),
        VggBNBone([128, 'U', 64, 'U', 32, 'U', 16], in_ch=128),
        xavier(tnn.Conv1x1(16, 1)), nn.Sigmoid())
Exemple #7
0
    def __init__(self, in_noise, out_ch, side_ch=1):
        super(VggImg2ImgGeneratorDebug, self).__init__()

        def make_block(in_ch, out_ch, **kwargs):
            return tnn.Conv2dNormReLU(
                in_ch,
                out_ch,
                norm=lambda out: tnn.Spade2d(out, side_ch, 64),
                **kwargs)

        self.net = tnn.CondSeq(
            kaiming(nn.Linear(in_noise, 128 * 16)), tnn.Reshape(128, 4, 4),
            nn.LeakyReLU(0.2, inplace=True),
            VggBNBone([128, 'U', 64, 'U', 32, 'U', 16],
                      in_ch=128,
                      block=make_block), xavier(tnn.Conv1x1(16, out_ch)),
            nn.Sigmoid())
Exemple #8
0
    def __init__(self, in_noise, out_ch, num_classes):
        super(VggClassCondGeneratorDebug, self).__init__()

        def make_block(in_ch, out_ch, **kwargs):
            return tnn.Conv2dNormReLU(
                in_ch,
                out_ch,
                norm=lambda out: tnn.ConditionalBN2d(out, 64),
                **kwargs)

        self.emb = nn.Embedding(num_classes, 64)
        self.net = tnn.CondSeq(
            kaiming(nn.Linear(in_noise, 128 * 16)), tnn.Reshape(128, 4, 4),
            nn.LeakyReLU(0.2, inplace=True),
            VggBNBone([128, 'U', 64, 'U', 32, 'U', 16],
                      in_ch=128,
                      block=make_block), xavier(tnn.Conv1x1(16, out_ch)),
            nn.Sigmoid())
Exemple #9
0
 def _do(m):
     if isinstance(m, nn.Linear):
         return tnn.Conv1x1(m.in_features, m.out_features)
     return m