예제 #1
0
    def __init__(self,
                 arch: List[int],
                 n_skip_max: int = 2,
                 in_noise: int = 256,
                 out_ch: int = 3,
                 batchnorm_in_output: bool = False) -> None:
        super().__init__()
        self.n_skip_max = n_skip_max
        self.make_noise = xavier(nn.Linear(in_noise, 4 * 4 * arch[0]))

        in_ch = arch[0]
        blocks = []
        lasts: List[int] = []
        for i, out in enumerate(arch[1:]):
            mode = 'nearest' if i % 2 == 0 else 'bilinear'
            blocks.append(tnn.AutoGANGenBlock(in_ch, out, lasts, mode=mode))
            lasts = ([out] + lasts)[:n_skip_max]
            in_ch = out
        self.blocks = nn.ModuleList(blocks)
        if batchnorm_in_output:
            self.to_rgb = nn.Sequential(nn.BatchNorm2d(arch[-1]),
                                        nn.ReLU(True),
                                        xavier(tnn.Conv3x3(arch[-1], out_ch)))
        else:
            self.to_rgb = nn.Sequential(nn.ReLU(True),
                                        xavier(tnn.Conv3x3(arch[-1], out_ch)))
예제 #2
0
    def __init__(self, arch: List[Union[str, int]]) -> None:
        super().__init__()
        self.arch = arch
        self.in_channels = 3
        in_ch = arch[0]

        features = tnn.CondSeq()
        assert isinstance(in_ch, int)
        features.add_module('input', tnn.Conv3x3(3, in_ch))

        ii = 0
        for i, (x, x2) in enumerate(zip(arch, arch[1:] + ['dummy'])):
            if x == 'D':
                continue

            downsample = x2 == 'D'
            assert isinstance(x, int)
            features.add_module(f'block_{ii}',
                                tnn.ResidualDiscrBlock(in_ch, x, downsample))
            in_ch = x
            ii += 1
        self.out_channels = in_ch
        features.add_module('final_relu', nn.LeakyReLU(0.2, True))
        assert isinstance(features.block_0, tnn.ResidualDiscrBlock)
        features.block_0.preact_skip()
        self.features = features

        self.classifier = ClassificationHead(self.out_channels, 1)
예제 #3
0
    def __init__(self, in_ch, num_classes, B=0):

        def ch(ch):
            return int(ch * 1.1**B) // 8 * 8

        def n_layers(d):
            return int(math.ceil(d * 1.2**B))

        def r():
            return int(224 * 1.15**B)

        super(EfficientNet, self).__init__(
            # Stage 1
            # nn.UpsamplingBilinear2d(size=(r(), r())),
            tu.kaiming(tnn.Conv3x3(in_ch, ch(32), stride=2, bias=False)),
            nn.BatchNorm2d(ch(32)),
            tnn.HardSwish(),

            # Stage 2
            MBConv(ch(32), ch(16), 3, mul_factor=1),
            *[
                MBConv(ch(16), ch(16), 3, mul_factor=1)
                for _ in range(n_layers(1) - 1)
            ],

            # Stage 3
            MBConv(ch(16), ch(24), 3, stride=2),
            *[MBConv(ch(24), ch(24), 3) for _ in range(n_layers(2) - 1)],

            # Stage 4
            MBConv(ch(24), ch(40), 5, stride=2),
            *[MBConv(ch(40), ch(40), 5) for _ in range(n_layers(2) - 1)],

            # Stage 5
            MBConv(ch(40), ch(80), 3, stride=2),
            *[MBConv(ch(80), ch(80), 3) for _ in range(n_layers(3) - 1)],

            # Stage 6
            MBConv(ch(80), ch(112), 5),
            *[MBConv(ch(112), ch(112), 5) for _ in range(n_layers(3) - 1)],

            # Stage 7
            MBConv(ch(112), ch(192), 5, stride=2),
            *[MBConv(ch(192), ch(192), 5) for _ in range(n_layers(4) - 1)],

            # Stage 8
            MBConv(ch(192), ch(320), 3),
            *[MBConv(ch(320), ch(320), 3) for _ in range(n_layers(1) - 1)],
            tu.kaiming(tnn.Conv1x1(ch(320), ch(1280), bias=False)),
            nn.BatchNorm2d(ch(1280)),
            tnn.HardSwish(),
            nn.AdaptiveAvgPool2d(1),
            tnn.Reshape(-1),
            tu.xavier(nn.Linear(ch(1280), num_classes)))
예제 #4
0
    def __init__(self, arch, n_skip_max=2, in_noise=256, out_ch=3):
        super(AutoGAN, self).__init__()
        self.n_skip_max = n_skip_max
        self.make_noise = (nn.Linear(in_noise, 4 * 4 * arch[0]))

        in_ch = arch[0]
        blocks = []
        lasts = []
        for i, out in enumerate(arch[1:]):
            mode = 'nearest' if i % 2 == 0 else 'bilinear'
            blocks.append(tnn.AutoGANGenBlock(in_ch, out, lasts, mode=mode))
            lasts = ([out] + lasts)[:n_skip_max]
            in_ch = out
        self.blocks = nn.ModuleList(blocks)
        self.to_rgb = nn.Sequential(nn.BatchNorm2d(arch[-1]), nn.ReLU(True),
                                    xavier(tnn.Conv3x3(arch[-1], out_ch)))
예제 #5
0
 def set_input_specs(self, in_channels: int) -> 'ResidualDiscriminator':
     self.features.input = tnn.Conv3x3(in_channels,
                                       self.features.input.out_channels)
     return self
예제 #6
0
 def set_input_specs(self, in_channels: int) -> 'VGG':
     c1 = self.features.conv_1_1
     assert isinstance(c1, tnn.ConvBlock)
     c1.conv = kaiming(tnn.Conv3x3(in_channels, c1.conv.out_channels, 3))
     return self