Beispiel #1
0
    def __init__(self,
                 arch: List[int],
                 n_skip_max: int = 2,
                 in_noise: int = 256,
                 out_ch: int = 3,
                 batchnorm_in_output: bool = False) -> None:
        super().__init__()
        self.n_skip_max = n_skip_max
        self.make_noise = xavier(nn.Linear(in_noise, 4 * 4 * arch[0]))

        in_ch = arch[0]
        blocks = []
        lasts: List[int] = []
        for i, out in enumerate(arch[1:]):
            mode = 'nearest' if i % 2 == 0 else 'bilinear'
            blocks.append(tnn.AutoGANGenBlock(in_ch, out, lasts, mode=mode))
            lasts = ([out] + lasts)[:n_skip_max]
            in_ch = out
        self.blocks = nn.ModuleList(blocks)
        if batchnorm_in_output:
            self.to_rgb = nn.Sequential(nn.BatchNorm2d(arch[-1]),
                                        nn.ReLU(True),
                                        xavier(tnn.Conv3x3(arch[-1], out_ch)))
        else:
            self.to_rgb = nn.Sequential(nn.ReLU(True),
                                        xavier(tnn.Conv3x3(arch[-1], out_ch)))
Beispiel #2
0
    def __init__(self, in_ch, out_ch, ks, stride=1, mul_factor=6):
        super(MBConv, self).__init__()
        self.in_ch = in_ch
        self.out_ch = out_ch
        self.ks = ks
        self.stride = stride
        self.factor = mul_factor

        hid = in_ch * mul_factor
        self.branch = tnn.CondSeq(
            tu.xavier(tnn.Conv1x1(in_ch, hid, bias=False)),
            nn.BatchNorm2d(hid), tnn.HardSwish(),
            tu.xavier(
                nn.Conv2d(hid,
                          hid,
                          ks,
                          stride=stride,
                          padding=ks // 2,
                          groups=hid,
                          bias=False)), nn.BatchNorm2d(hid), tnn.HardSwish(),
            tnn.SEBlock(hid, reduction=4), tu.xavier(tnn.Conv1x1(hid, out_ch)),
            nn.BatchNorm2d(out_ch))

        self.shortcut = tnn.CondSeq()

        if stride != 1:
            self.shortcut.add_module(
                'pool', nn.AvgPool2d(stride, stride, ceil_mode=True))

        if in_ch != out_ch:
            self.shortcut.add_module('conv',
                                     tnn.Conv1x1(in_ch, out_ch, bias=False))
            self.shortcut.add_module('bn', nn.BatchNorm2d(out_ch))
Beispiel #3
0
    def __init__(self,
                 in_ch: int,
                 out_ch: int,
                 noise_size: int,
                 upsample: bool = False,
                 n_layers: int = 2,
                 equal_lr: bool = True):
        super().__init__()
        self.upsample_mode = 'bilinear'
        self.equal_lr = equal_lr
        dyn = equal_lr
        self.upsample = upsample
        inside = ModuleGraph(outputs=f'in_{n_layers}')

        for i in range(n_layers):
            conv = ModulatedConv(in_ch,
                                 noise_size,
                                 out_ch,
                                 kernel_size=3,
                                 padding=1,
                                 bias=True)
            kaiming(conv, dynamic=dyn, a=0.2)
            inside.add_operation(inputs=[f'in_{i}', 'w'],
                                 outputs=[f'conv_{i}'],
                                 name=f'conv_{i}',
                                 operation=conv)

            noise = Noise(out_ch, inplace=True, bias=False)
            inside.add_operation(inputs=[f'conv_{i}', f'noise_{i}'],
                                 operation=noise,
                                 name=f'plus_noise_{i}',
                                 outputs=[f'plus_noise_{i}'])

            inside.add_operation(inputs=[f'plus_noise_{i}'],
                                 operation=nn.LeakyReLU(0.2, True),
                                 outputs=[f'in_{i+1}'],
                                 name=f'relu_{i+1}')

            in_ch = out_ch

        if dyn:
            for m in inside:
                if isinstance(m, ModulatedConv):
                    xavier(m.make_s, dynamic=True)

        self.inside = inside
        self.to_rgb = xavier(ModulatedConv(out_ch,
                                           noise_size,
                                           3,
                                           kernel_size=1,
                                           padding=0,
                                           bias=True,
                                           demodulate=False),
                             dynamic=dyn,
                             a=0.2)
Beispiel #4
0
 def __init__(self,
              channels,
              cond_channels,
              hidden,
              size=None,
              momentum=0.8):
     super(Spade2d, self).__init__(channels, momentum)
     self.initial = kaiming(Conv3x3(cond_channels, hidden, stride=2))
     self.make_weight = xavier(Conv3x3(hidden, channels))
     self.make_bias = xavier(Conv3x3(hidden, channels))
     self.size = size
Beispiel #5
0
    def to_spectral_norm(self) -> 'ResidualDiscrBlock':
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                try:
                    remove_weight_scale(m)
                except ValueError:
                    pass
                nn.utils.spectral_norm(m)

        assert isinstance(self.branch.conv2, nn.Conv2d)
        xavier(self.branch.conv2, a=0.2)
        return self
Beispiel #6
0
 def __init__(self, in_ch, reduction=16):
     super(SEBlock, self).__init__()
     self.proj = nn.Sequential(
         collections.OrderedDict([
             ('pool', nn.AdaptiveAvgPool2d(1)),
             ('squeeze', kaiming(nn.Conv2d(in_ch, in_ch // reduction, 1))),
             ('relu', nn.ReLU(True)),
             ('excite', xavier(nn.Conv2d(in_ch // reduction, in_ch, 1))),
             ('attn', HardSigmoid())
         ]))
Beispiel #7
0
 def __init__(self,
              in_channels: int,
              noise_channels: int,
              *args,
              demodulate: bool = True,
              **kwargs):
     super(ModulatedConv, self).__init__(in_channels, *args, **kwargs)
     self.make_s = tu.xavier(nn.Linear(noise_channels, in_channels))
     self.make_s.bias.data.fill_(1)
     self.demodulate = demodulate
Beispiel #8
0
    def __init__(self, feat_extractor, feature_size, num_classes):
        super(Classifier, self).__init__()
        self.bone = feat_extractor

        self.head = nn.Sequential(
            nn.AdaptiveMaxPool2d(1),
            tnn.Reshape(feature_size),
            kaiming(nn.Linear(feature_size, feature_size)),
            nn.ReLU(inplace=True),
            xavier(nn.Linear(feature_size, num_classes)),
        )
Beispiel #9
0
    def __init__(self, in_ch, num_classes, B=0):

        def ch(ch):
            return int(ch * 1.1**B) // 8 * 8

        def n_layers(d):
            return int(math.ceil(d * 1.2**B))

        def r():
            return int(224 * 1.15**B)

        super(EfficientNet, self).__init__(
            # Stage 1
            # nn.UpsamplingBilinear2d(size=(r(), r())),
            tu.kaiming(tnn.Conv3x3(in_ch, ch(32), stride=2, bias=False)),
            nn.BatchNorm2d(ch(32)),
            tnn.HardSwish(),

            # Stage 2
            MBConv(ch(32), ch(16), 3, mul_factor=1),
            *[
                MBConv(ch(16), ch(16), 3, mul_factor=1)
                for _ in range(n_layers(1) - 1)
            ],

            # Stage 3
            MBConv(ch(16), ch(24), 3, stride=2),
            *[MBConv(ch(24), ch(24), 3) for _ in range(n_layers(2) - 1)],

            # Stage 4
            MBConv(ch(24), ch(40), 5, stride=2),
            *[MBConv(ch(40), ch(40), 5) for _ in range(n_layers(2) - 1)],

            # Stage 5
            MBConv(ch(40), ch(80), 3, stride=2),
            *[MBConv(ch(80), ch(80), 3) for _ in range(n_layers(3) - 1)],

            # Stage 6
            MBConv(ch(80), ch(112), 5),
            *[MBConv(ch(112), ch(112), 5) for _ in range(n_layers(3) - 1)],

            # Stage 7
            MBConv(ch(112), ch(192), 5, stride=2),
            *[MBConv(ch(192), ch(192), 5) for _ in range(n_layers(4) - 1)],

            # Stage 8
            MBConv(ch(192), ch(320), 3),
            *[MBConv(ch(320), ch(320), 3) for _ in range(n_layers(1) - 1)],
            tu.kaiming(tnn.Conv1x1(ch(320), ch(1280), bias=False)),
            nn.BatchNorm2d(ch(1280)),
            tnn.HardSwish(),
            nn.AdaptiveAvgPool2d(1),
            tnn.Reshape(-1),
            tu.xavier(nn.Linear(ch(1280), num_classes)))
Beispiel #10
0
def VggGeneratorDebug(in_noise=32, out_ch=3):
    """
    A not so small Vgg net image GAN generator for testing purposes

    Args:
        in_noise (int): dimension of the input noise
        out_ch (int): number of output channels (3 for RGB images)

    Returns:
        a VGG instance
    """
    return nn.Sequential(
        kaiming(nn.Linear(in_noise, 128 * 16)), tnn.Reshape(128, 4, 4),
        nn.LeakyReLU(0.2, inplace=True),
        VggBNBone([128, 'U', 64, 'U', 32, 'U', 16], in_ch=128),
        xavier(tnn.Conv1x1(16, 1)), nn.Sigmoid())
Beispiel #11
0
    def __init__(self, arch, n_skip_max=2, in_noise=256, out_ch=3):
        super(AutoGAN, self).__init__()
        self.n_skip_max = n_skip_max
        self.make_noise = (nn.Linear(in_noise, 4 * 4 * arch[0]))

        in_ch = arch[0]
        blocks = []
        lasts = []
        for i, out in enumerate(arch[1:]):
            mode = 'nearest' if i % 2 == 0 else 'bilinear'
            blocks.append(tnn.AutoGANGenBlock(in_ch, out, lasts, mode=mode))
            lasts = ([out] + lasts)[:n_skip_max]
            in_ch = out
        self.blocks = nn.ModuleList(blocks)
        self.to_rgb = nn.Sequential(nn.BatchNorm2d(arch[-1]), nn.ReLU(True),
                                    xavier(tnn.Conv3x3(arch[-1], out_ch)))
Beispiel #12
0
    def __init__(self, in_noise, out_ch, side_ch=1):
        super(VggImg2ImgGeneratorDebug, self).__init__()

        def make_block(in_ch, out_ch, **kwargs):
            return tnn.Conv2dNormReLU(
                in_ch,
                out_ch,
                norm=lambda out: tnn.Spade2d(out, side_ch, 64),
                **kwargs)

        self.net = tnn.CondSeq(
            kaiming(nn.Linear(in_noise, 128 * 16)), tnn.Reshape(128, 4, 4),
            nn.LeakyReLU(0.2, inplace=True),
            VggBNBone([128, 'U', 64, 'U', 32, 'U', 16],
                      in_ch=128,
                      block=make_block), xavier(tnn.Conv1x1(16, out_ch)),
            nn.Sigmoid())
Beispiel #13
0
    def __init__(self, in_noise, out_ch, num_classes):
        super(VggClassCondGeneratorDebug, self).__init__()

        def make_block(in_ch, out_ch, **kwargs):
            return tnn.Conv2dNormReLU(
                in_ch,
                out_ch,
                norm=lambda out: tnn.ConditionalBN2d(out, 64),
                **kwargs)

        self.emb = nn.Embedding(num_classes, 64)
        self.net = tnn.CondSeq(
            kaiming(nn.Linear(in_noise, 128 * 16)), tnn.Reshape(128, 4, 4),
            nn.LeakyReLU(0.2, inplace=True),
            VggBNBone([128, 'U', 64, 'U', 32, 'U', 16],
                      in_ch=128,
                      block=make_block), xavier(tnn.Conv1x1(16, out_ch)),
            nn.Sigmoid())
Beispiel #14
0
 def __init__(self, channels, cond_channels, hidden, momentum=0.8):
     super(Spade2d, self).__init__(channels, momentum)
     self.initial = kaiming(Conv3x3(cond_channels, hidden, stride=2))
     self.make_weight = xavier(Conv3x3(hidden, channels))
     self.make_bias = xavier(Conv3x3(hidden, channels))
     self.register_buffer('weight', torch.ones(channels))