Пример #1
0
    def __init__(self, in_ch, out_ch, ks, stride=1, mul_factor=6):
        super(MBConv, self).__init__()
        self.in_ch = in_ch
        self.out_ch = out_ch
        self.ks = ks
        self.stride = stride
        self.factor = mul_factor

        hid = in_ch * mul_factor
        self.branch = tnn.CondSeq(
            tu.xavier(tnn.Conv1x1(in_ch, hid, bias=False)),
            nn.BatchNorm2d(hid), tnn.HardSwish(),
            tu.xavier(
                nn.Conv2d(hid,
                          hid,
                          ks,
                          stride=stride,
                          padding=ks // 2,
                          groups=hid,
                          bias=False)), nn.BatchNorm2d(hid), tnn.HardSwish(),
            tnn.SEBlock(hid, reduction=4), tu.xavier(tnn.Conv1x1(hid, out_ch)),
            nn.BatchNorm2d(out_ch))

        self.shortcut = tnn.CondSeq()

        if stride != 1:
            self.shortcut.add_module(
                'pool', nn.AvgPool2d(stride, stride, ceil_mode=True))

        if in_ch != out_ch:
            self.shortcut.add_module('conv',
                                     tnn.Conv1x1(in_ch, out_ch, bias=False))
            self.shortcut.add_module('bn', nn.BatchNorm2d(out_ch))
Пример #2
0
    def __init__(self,
                 ch: int,
                 n_down: int,
                 n_trunk: int = 2,
                 n_post: int = 1,
                 n_pre: int = 1,
                 n_att_conv: int = 2,
                 with_skips: bool = True) -> None:
        super(AttentionBlock, self).__init__()
        self.pre = tnn.CondSeq(*[Block(ch, ch) for _ in range(n_pre)])
        self.post = tnn.CondSeq(*[Block(ch, ch) for _ in range(n_post)])
        self.trunk = tnn.CondSeq(*[Block(ch, ch) for _ in range(n_trunk)])

        soft: nn.Module = UBlock1(ch)
        for _ in range(n_down - 1):
            soft = UBlock(ch, soft, with_skip=with_skips)

        if n_down >= 0:
            conv1 = [soft]
            for i in range(n_att_conv):
                conv1 += [
                    nn.BatchNorm2d(ch),
                    nn.ReLU(True),
                    tu.kaiming(tnn.Conv1x1(ch, ch, bias=(i != n_att_conv - 1)))
                ]
            conv1.append(nn.Sigmoid())

            self.mask = tnn.CondSeq(*conv1)
        else:
            self.mask = None
Пример #3
0
 def __init__(self, ch, inner):
     super(UBlock, self).__init__()
     self.inner = inner
     if inner is not None:
         self.skip = Block(ch, ch)
     self.encode = tnn.CondSeq(nn.MaxPool2d(3, 2, 1), Block(ch, ch))
     self.decode = tnn.CondSeq(Block(ch, ch),
                               nn.UpsamplingBilinear2d(scale_factor=2))
Пример #4
0
    def __init__(self, ch, n_down):
        super(AttentionBlock, self).__init__()
        self.pre = Block(ch, ch)
        self.post = Block(ch, ch)
        self.trunk = tnn.CondSeq(Block(ch, ch), Block(ch, ch))

        soft = None
        for _ in range(n_down):
            soft = UBlock(ch, soft)
        self.mask = tnn.CondSeq(soft, nn.BatchNorm2d(ch), nn.ReLU(True),
                                tu.kaiming(tnn.Conv1x1(ch, ch, bias=False)),
                                nn.BatchNorm2d(ch), nn.ReLU(True),
                                tu.kaiming(tnn.Conv1x1(ch, ch)),
                                tnn.HardSigmoid())
Пример #5
0
 def __init__(self, in_ch=3):
     super(Attention56Bone, self).__init__()
     self.head = tnn.CondSeq(tu.kaiming(tnn.Conv2d(in_ch, 64, 7, stride=2)),
                             nn.ReLU(True), nn.MaxPool2d(3, 2, 1))
     self.pre1 = Block(64, 256)
     self.attn1 = AttentionBlock(256, 3)
     self.pre2 = Block(256, 512, stride=2)
     self.attn2 = AttentionBlock(512, 2)
     self.pre3 = Block(512, 1024, stride=2)
     self.attn3 = AttentionBlock(1024, 1)
     self.pre4 = tnn.CondSeq(
         Block(1024, 2048, stride=2),
         Block(2048, 2048),
         Block(2048, 2048),
     )
Пример #6
0
 def __init__(self,
              ch: int,
              inner: Optional[nn.Module],
              with_skip: bool = True) -> None:
     super(UBlock, self).__init__()
     self.inner = inner
     if with_skip and inner is not None:
         self.skip = Block(ch, ch)
     else:
         self.skip = None
     self.encode = tnn.CondSeq(nn.MaxPool2d(3, 1, 1),
                               nn.UpsamplingBilinear2d(scale_factor=0.5),
                               Block(ch, ch))
     self.decode = tnn.CondSeq(Block(ch, ch),
                               nn.UpsamplingBilinear2d(scale_factor=2))
Пример #7
0
    def __init__(self, arch: list, num_classes: int) -> None:
        super().__init__()
        self.arch = arch
        in_ch = 3
        self.in_channels = in_ch

        feats = tnn.CondSeq()
        block_num = 1
        conv_num = 1
        for layer in arch:
            if layer == 'M':
                feats.add_module(f'pool_{block_num}', nn.MaxPool2d(2, 2))
                block_num += 1
                conv_num = 1
            else:
                ch = cast(int, layer)
                feats.add_module(
                    f'conv_{block_num}_{conv_num}',
                    tnn.ConvBlock(in_ch, ch, 3).remove_batchnorm())
                in_ch = ch
                conv_num += 1
        self.out_channels = ch

        self.features = feats
        self.classifier = ClassificationHead(self.out_channels, num_classes)
        self.classifier.to_vgg_style(4096)
Пример #8
0
    def __init__(self, in_ch, hid, out_ch, quant_lvls, sz, n_layer=3):
        super(PixCNNBase, self).__init__()
        self.sz = sz
        self.lin = tnn.CondSeq(
            tnn.TopLeftConv2d(in_ch, hid, 5, center=False, bias=sz),
            nn.ReLU(inplace=True))

        sz2 = sz[0] // 2, sz[1] // 2
        sz4 = sz[0] // 4, sz[1] // 4
        self.l1 = nn.Sequential(
            *[ResBlk(hid, hid * 2, hid, 5, sz) for _ in range(n_layer)])
        self.l2 = nn.Sequential(
            *[ResBlk(hid, hid * 2, hid, 5, sz2) for _ in range(n_layer)])
        self.l3 = nn.Sequential(
            *[ResBlk(hid, hid * 2, hid, 5, sz4) for _ in range(n_layer)])
        self.l4 = nn.Sequential(
            *[ResBlk(hid, hid * 2, hid, 5, sz4) for _ in range(n_layer)])
        self.l4 = nn.Sequential(
            *[ResBlk(hid, hid * 2, hid, 5, sz4) for _ in range(n_layer)])
        self.l5 = nn.Sequential(*[
            ResBlk(hid * 2, hid * 4, hid * 2, 5, sz2) for _ in range(n_layer)
        ])
        self.l6 = nn.Sequential(
            *
            [ResBlk(hid * 3, hid * 6, hid * 3, 5, sz) for _ in range(n_layer)])

        self.lout = PixelPredictor(hid * 3, out_ch)
Пример #9
0
    def __init__(self, arch: List[Union[str, int]]) -> None:
        super().__init__()
        self.arch = arch
        self.in_channels = 3
        in_ch = arch[0]

        features = tnn.CondSeq()
        assert isinstance(in_ch, int)
        features.add_module('input', tnn.Conv3x3(3, in_ch))

        ii = 0
        for i, (x, x2) in enumerate(zip(arch, arch[1:] + ['dummy'])):
            if x == 'D':
                continue

            downsample = x2 == 'D'
            assert isinstance(x, int)
            features.add_module(f'block_{ii}',
                                tnn.ResidualDiscrBlock(in_ch, x, downsample))
            in_ch = x
            ii += 1
        self.out_channels = in_ch
        features.add_module('final_relu', nn.LeakyReLU(0.2, True))
        assert isinstance(features.block_0, tnn.ResidualDiscrBlock)
        features.block_0.preact_skip()
        self.features = features

        self.classifier = ClassificationHead(self.out_channels, 1)
Пример #10
0
    def to_standard_arch(self):
        self._modules.clear()
        arch = self.arch
        self.input = tnn.ConvBlock(3, int(arch[0]), 7)
        ch, i = int(arch[0]), 1

        ii = 0
        self.encode = tnn.CondSeq()
        while arch[i][0] == 'd':
            out_ch = int(arch[i][1:])
            self.encode.add_module(f'conv_{ii}',
                                   tnn.ConvBlock(ch, out_ch, 3, stride=2))
            ch = out_ch
            i += 1
            ii += 1

        ii = 0
        self.transform = tnn.CondSeq()
        while arch[i][0] == 'R':
            out_ch = int(arch[i][1:])
            self.transform.add_module(f'transform_{ii}',
                                      tnn.PreactResBlock(ch, out_ch))
            ch = out_ch
            i += 1
            ii += 1

        ii = 0
        self.decode = tnn.CondSeq()
        while i < len(arch) and arch[i][0] == 'u':
            out_ch = int(arch[i][1:])
            self.decode.add_module(
                f'out_conv_{ii}',
                tnn.ConvBlock(ch, out_ch, 3, stride=1).add_upsampling())
            ch = out_ch
            i += 1
            ii += 1
        self.to_rgb = tnn.ConvBlock(out_ch, 3, 7).remove_batchnorm()
        self.to_rgb.relu = nn.Sigmoid()

        def to_instance_norm(m):
            if isinstance(m, nn.BatchNorm2d):
                return nn.InstanceNorm2d(m.num_features, affine=True)
            if isinstance(m, nn.Conv2d):
                m.padding_mode = 'reflect'
            return m

        tnn.utils.edit_model(self, to_instance_norm)
Пример #11
0
 def __init__(self, num_classes: int) -> None:
     super(Attention56Bone, self).__init__(
         OrderedDict([
             ('head',
              tnn.CondSeq(tu.kaiming(tnn.Conv2d(3, 64, 7, stride=2)),
                          nn.ReLU(True), nn.MaxPool2d(3, 2, 1))),
             ('pre1', Block(64, 256)), ('attn1', AttentionBlock(256, 3)),
             ('pre2', Block(256, 512, stride=2)),
             ('attn2', AttentionBlock(512, 2)),
             ('pre3', Block(512, 1024, stride=2)),
             ('attn3', AttentionBlock(1024, 1)),
             ('pre4',
              tnn.CondSeq(
                  Block(1024, 2048, stride=2),
                  Block(2048, 2048),
                  Block(2048, 2048),
              )), ('classifier', ClassificationHead(2048, num_classes))
         ]))
Пример #12
0
def _parse_snres(arch, in_ch):
    blocks = []
    for x, x2 in zip(arch, arch[1:] + ['dummy']):
        if x == 'D':
            continue

        downsample = x2 == 'D'
        blocks.append(tnn.SNResidualDiscrBlock(in_ch, x, downsample))
        in_ch = x
    return tnn.CondSeq(*blocks), in_ch
Пример #13
0
 def __init__(self, in_ch, hid_ch, out_ch, ks, sz):
     super(ResBlk, self).__init__()
     self.go = tnn.CondSeq(
         nn.BatchNorm2d(in_ch),
         nn.ReLU(inplace=False),
         tnn.Conv1x1(in_ch, hid_ch),
         nn.BatchNorm2d(hid_ch),
         nn.ReLU(inplace=True),
         tnn.TopLeftConv2d(hid_ch, hid_ch, ks, center=True, bias=sz),
         nn.BatchNorm2d(hid_ch),
         nn.ReLU(inplace=True),
         tnn.Conv1x1(hid_ch, out_ch),
     )
Пример #14
0
    def __init__(self, arch: List[str], num_classes: int) -> None:
        super().__init__()

        def parse(layer: str) -> List[int]:
            return [int(x) for x in layer.split(':')]

        self.arch = list(map(parse, arch))

        self.features = tnn.CondSeq()
        self.features.add_module('input', ResNetInput(3, self.arch[0][0]))

        self._change_block_type('basic')
        self.classifier = ClassificationHead(self.arch[-1][0], num_classes)
Пример #15
0
    def __init__(self, arch: List[int], num_classes: int) -> None:
        super().__init__()
        self.arch = arch
        self.in_channels = 3
        self.out_channels = arch[-1]

        feats = tnn.CondSeq()
        feats.input = tnn.ConvBlock(3, arch[0], 3)

        encdec: nn.Module = tnn.ConvBlock(arch[-1], arch[-1] * 2, 3)
        for outer, inner in zip(arch[-2::-1], arch[:0:-1]):
            encdec = tnn.UBlock(outer, inner, encdec)
        feats.encoder_decoder = encdec
        self.features = feats
        assert isinstance(encdec.out_channels, int)
        self.classifier = tnn.ConvBlock(encdec.out_channels, num_classes,
                                        3).remove_batchnorm().no_relu()
Пример #16
0
    def __init__(self, in_noise, out_ch, side_ch=1):
        super(VggImg2ImgGeneratorDebug, self).__init__()

        def make_block(in_ch, out_ch, **kwargs):
            return tnn.Conv2dNormReLU(
                in_ch,
                out_ch,
                norm=lambda out: tnn.Spade2d(out, side_ch, 64),
                **kwargs)

        self.net = tnn.CondSeq(
            kaiming(nn.Linear(in_noise, 128 * 16)), tnn.Reshape(128, 4, 4),
            nn.LeakyReLU(0.2, inplace=True),
            VggBNBone([128, 'U', 64, 'U', 32, 'U', 16],
                      in_ch=128,
                      block=make_block), xavier(tnn.Conv1x1(16, out_ch)),
            nn.Sigmoid())
Пример #17
0
    def __init__(self, arch: List[int]) -> None:
        super().__init__()
        layers: List[nn.Module] = [
            tnn.ConvBlock(3, arch[0], kernel_size=4,
                          stride=2).remove_batchnorm().leaky()
        ]

        in_ch = arch[0]
        self.in_channels = in_ch
        for next_ch in arch[1:]:
            layers.append(
                tnn.ConvBlock(in_ch, next_ch, kernel_size=4, stride=2).leaky())
            in_ch = next_ch
        assert isinstance(layers[-1], tnn.ConvBlock)
        layers[-1].conv.stride = (1, 1)

        self.features = tnn.CondSeq(*layers)
        self.classifier = tnn.Conv2d(in_ch, 1, 4)
Пример #18
0
    def __init__(self, in_noise, out_ch, num_classes):
        super(VggClassCondGeneratorDebug, self).__init__()

        def make_block(in_ch, out_ch, **kwargs):
            return tnn.Conv2dNormReLU(
                in_ch,
                out_ch,
                norm=lambda out: tnn.ConditionalBN2d(out, 64),
                **kwargs)

        self.emb = nn.Embedding(num_classes, 64)
        self.net = tnn.CondSeq(
            kaiming(nn.Linear(in_noise, 128 * 16)), tnn.Reshape(128, 4, 4),
            nn.LeakyReLU(0.2, inplace=True),
            VggBNBone([128, 'U', 64, 'U', 32, 'U', 16],
                      in_ch=128,
                      block=make_block), xavier(tnn.Conv1x1(16, out_ch)),
            nn.Sigmoid())
Пример #19
0
def VggBNBone(arch, in_ch=3, leak=0, block=tnn.Conv2dBNReLU, debug=False):
    """
    Construct a VGG net

    How to specify a VGG architecture:

    It's a list of blocks specifications. Blocks are either:

    - 'M' for maxpool of kernel size 2 and stride 2
    - 'A' for average pool of kernel size 2 and stride 2
    - 'U' for nearest neighbors upsampling (scale factor 2)
    - an integer `ch` for a block with `ch` output channels

    Args:
        arch (list): architecture specification
        in_ch (int): number of input channels
        leak (float): leak in relus
        block (fn): block ctor

    Returns:
        A VGG instance
    """
    layers = []

    if debug:
        layers.append(tnn.Debug('Input'))

    for i, layer in enumerate(arch):
        if layer == 'M':
            layers.append(nn.MaxPool2d(2, 2))
        elif layer == 'A':
            layers.append(nn.AvgPool2d(2, 2))
        elif layer == 'U':
            layers.append(nn.UpsamplingNearest2d(scale_factor=2))
        else:
            layers.append(block(in_ch, layer, ks=3, leak=leak))
            in_ch = layer
        if debug:
            layer_name = 'layer_{}_{}'.format(layers[-1].__class__.__name__, i)
            layers.append(tnn.Debug(layer_name))
    return tnn.CondSeq(*layers)
Пример #20
0
        def _build(i, prev_ch):
            ch = int(arch[i][1:])

            if arch[i][0] == 'R':
                transforms = tnn.CondSeq()
                transforms.in_channels = prev_ch
                ii = 0
                while arch[i][0] == 'R':
                    ch = int(arch[i][1:])
                    transforms.add_module(f'transform_{ii}',
                                          tnn.PreactResBlock(prev_ch, ch))
                    prev_ch = ch
                    i += 1
                    ii += 1
                transforms.out_channels = ch
                return transforms
            if arch[i][0] == 'd':
                u = tnn.encdec.UBlock(prev_ch, ch, _build(i + 1, ch))
                u.to_bilinear_sampling()
                u.set_decoder_num_layers(1).set_encoder_num_layers(1)
                return u
Пример #21
0
    def _change_block_type(self, ty: str) -> None:
        arch = self.arch

        feats = tnn.CondSeq()
        assert isinstance(self.features.input, ResNetInput)
        feats.add_module('input', self.features.input)
        in_ch = arch[0][0]
        self.in_channels = in_ch

        for i, (ch, s) in enumerate(arch):
            feats.add_module(f'block_{i}',
                             self._make_block(ty, in_ch, ch, stride=s))
            in_ch = ch
        self.out_channels = ch

        if 'preact' in ty:
            assert isinstance(feats.block_0, PREACT_BLOCKS)
            feats.block_0.no_preact()
            feats.add_module('final_bn', nn.BatchNorm2d(self.out_channels))
            feats.add_module('final_relu', nn.ReLU(True))
        self.features = feats
Пример #22
0
def ResNetBone(arch, head, block, in_ch=3, debug=False):
    """
    A resnet

    How to specify an architecture:

    It's a list of block specifications. Each element is a string of the form
    "output channels:stride". For instance "64:2" is a block with input stride
    2 and 64 output channels.

    Args:
        arch (list): the architecture specification
        head (fn): the module ctor to build for the first conv
        block (fn): the residual block to use ctor
        in_ch (int): number of input channels, 3 for RGB images
        debug (bool): should insert debug layers between each layer

    Returns:
        A Resnet instance
    """
    def parse(l):
        return [int(x) for x in l.split(':')]

    layers = []

    if debug:
        layers.append(tnn.Debug('Input'))

    ch, s = parse(arch[0])
    layers.append(head(in_ch, ch, 7, stride=s))
    if debug:
        layers.append(tnn.Debug('Head'))
    in_ch = ch
    for i, (ch, s) in enumerate(map(parse, arch[1:])):
        layers.append(block(in_ch, ch, stride=s))
        in_ch = ch
        if debug:
            layer_name = 'layer_{}_{}'.format(layers[-1].__class__.__name__, i)
            layers.append(tnn.Debug(layer_name))
    return tnn.CondSeq(*layers)
Пример #23
0
def base_patch_discr(arch, in_ch=3, out_ch=1, norm=None):
    def block(in_ch, out_ch, norm):
        if norm is None:
            return [
                kaiming(nn.Conv2d(in_ch, out_ch, 4, stride=2, padding=1),
                        a=0.2),
                nn.LeakyReLU(0.2, inplace=True)
            ]
        else:
            return [
                kaiming(nn.Conv2d(in_ch, out_ch, 4, stride=2, padding=1),
                        a=0.2),
                norm(out_ch),
                nn.LeakyReLU(0.2, inplace=True)
            ]

    layers = block(in_ch, arch[0], None)

    in_ch = arch[0]
    for out_ch in arch[1:]:
        layers += block(in_ch, out_ch, norm)
        in_ch = out_ch

    return tnn.CondSeq(*layers)
Пример #24
0
 def __init__(self, ch):
     super(UBlock1, self).__init__()
     self.inner = tnn.CondSeq(nn.MaxPool2d(3, 1, 1),
                              nn.UpsamplingBilinear2d(scale_factor=0.5),
                              Block(ch, ch),
                              nn.UpsamplingBilinear2d(scale_factor=2))