def __init__(self, depth=7, latent_size=512, use_eql=True): """ constructor for the Generator class :param depth: required depth of the Network :param latent_size: size of the latent manifold :param use_eql: whether to use equalized learning rate """ super().__init__() assert latent_size != 0 and ((latent_size & (latent_size - 1)) == 0), "latent size not a power of 2" if depth >= 4: assert latent_size >= np.power( 2, depth - 4), "latent size will diminish to zero" # state of the generator: self.use_eql = use_eql self.depth = depth self.latent_size = latent_size self.fc = nn.Sequential( nn.Linear(512, 512), nn.BatchNorm1d(512), nn.ReLU(), ) # register the modules required for the GAN self.initial_block = GenInitialBlock(self.latent_size, use_eql=self.use_eql) # create a module list of the other required general convolution blocks self.layers = ModuleList([]) # initialize to empty list # create the ToRGB layers for various outputs: if self.use_eql: self.toRGB = lambda in_channels: _equalized_conv2d( in_channels, 3, (1, 1), bias=True) else: self.toRGB = lambda in_channels: Conv2d( in_channels, 3, (1, 1), bias=True) self.rgb_converters = ModuleList([self.toRGB(self.latent_size)]) # create the remaining layers for i in range(self.depth - 1): if i <= 2: layer = GenGeneralConvBlock(self.latent_size, self.latent_size, use_eql=self.use_eql) rgb = self.toRGB(self.latent_size) else: layer = GenGeneralConvBlock( int(self.latent_size // np.power(2, i - 3)), int(self.latent_size // np.power(2, i - 2)), use_eql=self.use_eql) rgb = self.toRGB(int(self.latent_size // np.power(2, i - 2))) self.layers.append(layer) self.rgb_converters.append(rgb) # register the temporary upsampler self.temporaryUpsampler = lambda x: interpolate(x, scale_factor=2)
def __init__(self, height=7, feature_size=512, use_eql=True): """ constructor for the class :param height: total height of the discriminator (Must be equal to the Generator depth) :param feature_size: size of the deepest features extracted (Must be equal to Generator latent_size) :param use_eql: whether to use equalized learning rate """ super().__init__() assert feature_size != 0 and ((feature_size & (feature_size - 1)) == 0), \ "latent size not a power of 2" if height >= 4: assert feature_size >= np.power( 2, height - 4), "feature size cannot be produced" # create state of the object self.use_eql = use_eql self.height = height self.feature_size = feature_size self.final_block = DisFinalBlock(self.feature_size, use_eql=self.use_eql) # create a module list of the other required general convolution blocks self.layers = ModuleList([]) # initialize to empty list # create the fromRGB layers for various inputs: if self.use_eql: from pro_gan_pytorch.CustomLayers import _equalized_conv2d self.fromRGB = lambda out_channels: _equalized_conv2d( 3, out_channels, (1, 1), bias=True) else: from torch.nn import Conv2d self.fromRGB = lambda out_channels: Conv2d( 3, out_channels, (1, 1), bias=True) self.rgb_to_features = ModuleList([self.fromRGB(self.feature_size)]) # create the remaining layers for i in range(self.height - 1): if i > 2: layer = DisGeneralConvBlock( int(self.feature_size // np.power(2, i - 2)), int(self.feature_size // np.power(2, i - 3)), use_eql=self.use_eql) rgb = self.fromRGB(int(self.feature_size // np.power(2, i - 2))) else: layer = DisGeneralConvBlock(self.feature_size, self.feature_size, use_eql=self.use_eql) rgb = self.fromRGB(self.feature_size) self.layers.append(layer) self.rgb_to_features.append(rgb) # register the temporary downSampler self.temporaryDownsampler = AvgPool2d(2)
def from_rgb(out_channels): return _equalized_conv2d(1, out_channels, (1, 1), bias=True)
def to_rgb(in_channels): return _equalized_conv2d(in_channels, 1, (1, 1), bias=True)