def __init__(self, latent_size, scale=0.01): super().__init__( ConvEq(latent_size, latent_size, 1, scale=scale), ReLU(), ConvEq(latent_size, latent_size, 1, scale=scale), ReLU(), ConvEq(latent_size, latent_size, 1, scale=scale), ReLU(), )
def __init__(self, in_channels, out_channels, latent_size): super().__init__() self.conv = ConvTransposeEq(in_channels, out_channels, 4) self.noise = AdditiveNoise(out_channels) self.relu = ReLU() self.norm = AdaptiveInstanceNorm(out_channels, latent_size)
def __init__(self, in_channels, out_channels, latent_size, upsample=False): super().__init__() self.upsample = Upsample(scale_factor=2) if upsample else NoOp() self.conv = ConvEq(in_channels, out_channels, 3, padding=1) self.noise = AdditiveNoise(out_channels) self.relu = ReLU() self.norm = AdaptiveInstanceNorm(out_channels, latent_size)
def __init__(self, in_channels, out_channels, latent_size): super().__init__() self.input = nn.Parameter(torch.Tensor(1, out_channels, 4, 4)) self.noise = AdditiveNoise(out_channels) self.relu = ReLU() self.norm = AdaptiveInstanceNorm(out_channels, latent_size) torch.nn.init.constant_(self.input, 1.0)
def build_level_layers(level, base_channels=16): in_channels = base_channels * 2 ** (max_pow - level - 1) out_channels = base_channels * 2 ** (max_pow - level) if level == 0: conv = nn.Sequential(MidBlock(in_channels, in_channels), ZeroBlock(in_channels)) else: conv = nn.Sequential( MidBlock(in_channels, in_channels), MidBlock(in_channels, out_channels, downsample=True), ) from_rgb = nn.Sequential(ConvEq(3, in_channels, 1), ReLU()) return nn.ModuleDict( OrderedDict( { "conv": conv, "from_rgb": from_rgb, } ) )
def __init__(self, in_channels, out_channels, downsample=False): super().__init__( ConvEq(in_channels, out_channels, 3, padding=1), ReLU(), Upsample(scale_factor=0.5) if downsample else NoOp(), )
def __init__(self, in_channels): super().__init__(ConvEq(in_channels, in_channels, 4), ReLU(), ConvEq(in_channels, 1, 1))