Exemplo n.º 1
0
Arquivo: net.py Projeto: jhejna/ul_gen
    def __init__(self, inputs, outputs, latent_size, last=False, fused_scale=True):
        super(EncodeBlock, self).__init__()
        self.conv_1 = ln.Conv2d(inputs, inputs, 3, 1, 1, bias=False)
        # self.conv_1 = ln.Conv2d(inputs + (1 if last else 0), inputs, 3, 1, 1, bias=False)
        self.bias_1 = nn.Parameter(torch.Tensor(1, inputs, 1, 1))
        self.instance_norm_1 = nn.InstanceNorm2d(inputs, affine=False)
        self.blur = Blur(inputs)
        self.last = last
        self.fused_scale = fused_scale
        if last:
            self.dense = ln.Linear(inputs * 4 * 4, outputs)
        else:
            if fused_scale:
                self.conv_2 = ln.Conv2d(inputs, outputs, 3, 2, 1, bias=False, transform_kernel=True)
            else:
                self.conv_2 = ln.Conv2d(inputs, outputs, 3, 1, 1, bias=False)

        self.bias_2 = nn.Parameter(torch.Tensor(1, outputs, 1, 1))
        self.instance_norm_2 = nn.InstanceNorm2d(outputs, affine=False)
        self.style_1 = ln.Linear(2 * inputs, latent_size)
        if last:
            self.style_2 = ln.Linear(outputs, latent_size)
        else:
            self.style_2 = ln.Linear(2 * outputs, latent_size)

        with torch.no_grad():
            self.bias_1.zero_()
            self.bias_2.zero_()
Exemplo n.º 2
0
Arquivo: net.py Projeto: jhejna/ul_gen
    def __init__(self, inputs, outputs, latent_size, has_first_conv=True, fused_scale=True, layer=0):
        super(DecodeBlock, self).__init__()
        self.has_first_conv = has_first_conv
        self.inputs = inputs
        self.has_first_conv = has_first_conv
        self.fused_scale = fused_scale
        if has_first_conv:
            if fused_scale:
                self.conv_1 = ln.ConvTranspose2d(inputs, outputs, 3, 2, 1, bias=False, transform_kernel=True)
            else:
                self.conv_1 = ln.Conv2d(inputs, outputs, 3, 1, 1, bias=False)

        self.blur = Blur(outputs)
        self.noise_weight_1 = nn.Parameter(torch.Tensor(1, outputs, 1, 1))
        self.noise_weight_1.data.zero_()
        self.bias_1 = nn.Parameter(torch.Tensor(1, outputs, 1, 1))
        self.instance_norm_1 = nn.InstanceNorm2d(outputs, affine=False, eps=1e-8)
        self.style_1 = ln.Linear(latent_size, 2 * outputs, gain=1)

        self.conv_2 = ln.Conv2d(outputs, outputs, 3, 1, 1, bias=False)
        self.noise_weight_2 = nn.Parameter(torch.Tensor(1, outputs, 1, 1))
        self.noise_weight_2.data.zero_()
        self.bias_2 = nn.Parameter(torch.Tensor(1, outputs, 1, 1))
        self.instance_norm_2 = nn.InstanceNorm2d(outputs, affine=False, eps=1e-8)
        self.style_2 = ln.Linear(latent_size, 2 * outputs, gain=1)

        self.layer = layer

        with torch.no_grad():
            self.bias_1.zero_()
            self.bias_2.zero_()
Exemplo n.º 3
0
Arquivo: net.py Projeto: jhejna/ul_gen
    def __init__(self, latent_size, channels=3, image_size=64):
        super(EncoderFC, self).__init__()

        self.channels = channels
        self.latent_size = latent_size
        self.image_size = image_size

        self.fc_1 = ln.Linear(image_size * image_size, 1024)
        self.fc_2 = ln.Linear(1024, 1024)
        self.fc_3 = ln.Linear(1024, latent_size)
Exemplo n.º 4
0
 def __init__(self, inputs, output, lrmul):
     super(MappingBlock, self).__init__()
     with torch.no_grad():
         self.fc = ln.Linear(inputs, output, lrmul=lrmul)
         self.fc.weight.data = self.fc.weight.data.double()
         self.fc.weight.data = gram_schmidt(self.fc.weight.data)
         self.fc.bias.data = self.fc.bias.data.double()
         self.i_fc = ln.Linear(output, inputs, lrmul=lrmul)
         self.last_activation = None
         self.alpha = 0.2
    def __init__(self, startf, maxf, layer_count, latent_size, channels=3):
        super(EncoderFC, self).__init__()
        self.maxf = maxf
        self.startf = startf
        self.layer_count = layer_count
        self.channels = channels
        self.latent_size = latent_size

        self.fc_1 = ln.Linear(28 * 28, 1024)
        self.fc_2 = ln.Linear(1024, 1024)
        self.fc_3 = ln.Linear(1024, latent_size)
Exemplo n.º 6
0
Arquivo: net.py Projeto: jhejna/ul_gen
    def __init__(self, latent_size=128, channels=3, image_size=64):
        super(GeneratorFC, self).__init__()

        self.channels = channels
        self.latent_size = latent_size
        self.image_size = images_size

        self.fc_1 = ln.Linear(latent_size, 1024)
        self.fc_2 = ln.Linear(1024, 1024)
        self.fc_3 = ln.Linear(1024, image_size * image_size)

        self.layer_to_resolution = [28] * 10
Exemplo n.º 7
0
 def __init__(self, inputs, output, lrmul):
     super(MappingBlock, self).__init__()
     with torch.no_grad():
         self.fc = ln.Linear(inputs, output, lrmul=lrmul)
         self.fc.weight.data = self.fc.weight.data.double()
         # print("Before", torch.norm(self.fc.weight))
         self.fc.weight.data = self.fc.weight.data * 0.9 + gram_schmidt(
             self.fc.weight.data) * 0.1
         # print("After", torch.norm(self.fc.weight))
         self.fc.bias.data = self.fc.bias.data.double()
         self.i_fc = ln.Linear(output, inputs, lrmul=lrmul)
         self.last_activation = None
         self.alpha = 0.2
Exemplo n.º 8
0
Arquivo: net.py Projeto: wvuvl/GPND2
    def __init__(self, inputs, outputs, has_first_conv=True, layer=0):
        super(DecodeBlock, self).__init__()
        self.has_first_conv = has_first_conv
        self.inputs = inputs
        self.has_first_conv = has_first_conv
        if has_first_conv:
            self.conv_1 = ln.Conv2d(inputs, outputs, 3, 1, 1, bias=False)
        else:
            self.dense = ln.Linear(inputs, outputs * 4 * 4)

        self.blur = Blur(outputs)
        self.noise_weight_1 = nn.Parameter(torch.Tensor(1, outputs, 1, 1))
        self.noise_weight_1.data.zero_()
        self.bias_1 = nn.Parameter(torch.Tensor(1, outputs, 1, 1))

        self.conv_2 = ln.Conv2d(outputs, outputs, 3, 1, 1, bias=False)
        self.noise_weight_2 = nn.Parameter(torch.Tensor(1, outputs, 1, 1))
        self.noise_weight_2.data.zero_()
        self.bias_2 = nn.Parameter(torch.Tensor(1, outputs, 1, 1))

        self.layer = layer

        with torch.no_grad():
            self.bias_1.zero_()
            self.bias_2.zero_()
    def __init__(self, startf=32, maxf=256, layer_count=3, channels=3):
        super(Discriminator, self).__init__()
        self.maxf = maxf
        self.startf = startf
        self.layer_count = layer_count
        self.from_rgb = nn.ModuleList()
        self.channels = channels

        mul = 2
        inputs = startf
        self.encode_block: nn.ModuleList[DiscriminatorBlock] = nn.ModuleList()

        resolution = 2**(self.layer_count + 1)

        for i in range(self.layer_count):
            outputs = min(self.maxf, startf * mul)

            self.from_rgb.append(FromRGB(channels, inputs))

            fused_scale = resolution >= 128

            block = DiscriminatorBlock(inputs,
                                       outputs,
                                       i == self.layer_count - 1,
                                       fused_scale=fused_scale)

            resolution //= 2

            #print("encode_block%d %s" % ((i + 1), millify(count_parameters(block))))
            self.encode_block.append(block)
            inputs = outputs
            mul *= 2

        self.fc2 = ln.Linear(inputs, 1, gain=1)
Exemplo n.º 10
0
Arquivo: net.py Projeto: jhejna/ul_gen
    def __init__(self, startf, maxf, layer_count, latent_size, channels=3):
        super(EncoderWithFC, self).__init__()
        self.maxf = maxf
        self.startf = startf
        self.layer_count = layer_count
        self.from_rgb: nn.ModuleList[FromRGB] = nn.ModuleList()
        self.channels = channels
        self.latent_size = latent_size

        mul = 2
        inputs = startf
        self.encode_block: nn.ModuleList[EncodeBlock] = nn.ModuleList()

        resolution = 2 ** (self.layer_count + 1)

        for i in range(self.layer_count):
            outputs = min(self.maxf, startf * mul)

            self.from_rgb.append(FromRGB(channels, inputs))

            fused_scale = resolution >= 128

            block = EncodeBlock(inputs, outputs, latent_size, i == self.layer_count - 1, fused_scale=fused_scale)

            resolution //= 2

            #print("encode_block%d %s styles out: %d" % ((i + 1), millify(count_parameters(block)), inputs))
            self.encode_block.append(block)
            inputs = outputs
            mul *= 2

        self.fc2 = ln.Linear(inputs, 1, gain=1)
    def __init__(self,
                 startf=32,
                 maxf=256,
                 layer_count=3,
                 latent_size=128,
                 channels=3):
        super(GeneratorFC, self).__init__()
        self.maxf = maxf
        self.startf = startf
        self.layer_count = layer_count
        self.channels = channels
        self.latent_size = latent_size

        self.fc_1 = ln.Linear(latent_size, 1024)
        self.fc_2 = ln.Linear(1024, 1024)
        self.fc_3 = ln.Linear(1024, 28 * 28)

        self.layer_to_resolution = [28] * 10
Exemplo n.º 12
0
Arquivo: net.py Projeto: jhejna/ul_gen
 def __init__(self, mapping_layers=5, latent_size=256, dlatent_size=256, mapping_fmaps=256):
     super(VAEMappingToLatentNoStyle, self).__init__()
     inputs = latent_size
     self.mapping_layers = mapping_layers
     self.map_blocks: nn.ModuleList[MappingBlock] = nn.ModuleList()
     for i in range(mapping_layers):
         outputs = dlatent_size if i == mapping_layers - 1 else mapping_fmaps
         block = ln.Linear(inputs, outputs, lrmul=0.1)
         inputs = outputs
         self.map_blocks.append(block)
Exemplo n.º 13
0
Arquivo: net.py Projeto: wvuvl/GPND2
 def __init__(self,
              mapping_layers=5,
              net_inputs=256,
              hidden_size=256,
              net_outputs=1):
     super(Discriminator, self).__init__()
     inputs = net_inputs
     self.mapping_layers = mapping_layers
     self.map_blocks: nn.ModuleList[MappingBlock] = nn.ModuleList()
     for i in range(mapping_layers):
         outputs = hidden_size if i == mapping_layers - 1 else net_outputs
         block = ln.Linear(inputs, outputs, lrmul=0.1)
         inputs = outputs
         self.map_blocks.append(block)
Exemplo n.º 14
0
Arquivo: net.py Projeto: wvuvl/GPND2
    def __init__(self, inputs, outputs, last=False):
        super(EncodeBlock, self).__init__()
        self.conv_1 = ln.Conv2d(inputs, inputs, 3, 1, 1, bias=False)
        # self.conv_1 = ln.Conv2d(inputs + (1 if last else 0), inputs, 3, 1, 1, bias=False)
        self.bias_1 = nn.Parameter(torch.Tensor(1, inputs, 1, 1))
        self.blur = Blur(inputs)
        self.last = last
        if last:
            self.dense = ln.Linear(inputs * 4 * 4, outputs)
        else:
            self.conv_2 = ln.Conv2d(inputs, outputs, 3, 1, 1, bias=False)

        self.bias_2 = nn.Parameter(torch.Tensor(1, outputs, 1, 1))

        with torch.no_grad():
            self.bias_1.zero_()
            self.bias_2.zero_()
Exemplo n.º 15
0
Arquivo: net.py Projeto: jhejna/ul_gen
    def __init__(self, inputs, outputs, last=False, fused_scale=True, dense=False):
        super(DiscriminatorBlock, self).__init__()
        self.conv_1 = ln.Conv2d(inputs + (1 if last else 0), inputs, 3, 1, 1, bias=False)
        self.bias_1 = nn.Parameter(torch.Tensor(1, inputs, 1, 1))
        self.blur = Blur(inputs)
        self.last = last
        self.dense_ = dense
        self.fused_scale = fused_scale
        if self.dense_:
            self.dense = ln.Linear(inputs * 4 * 4, outputs)
        else:
            if fused_scale:
                self.conv_2 = ln.Conv2d(inputs, outputs, 3, 2, 1, bias=False, transform_kernel=True)
            else:
                self.conv_2 = ln.Conv2d(inputs, outputs, 3, 1, 1, bias=False)

        self.bias_2 = nn.Parameter(torch.Tensor(1, outputs, 1, 1))

        with torch.no_grad():
            self.bias_1.zero_()
            self.bias_2.zero_()
Exemplo n.º 16
0
Arquivo: net.py Projeto: vii33/ALAE
    def __init__(self,
                 startf=32,
                 maxf=256,
                 layer_count=3,
                 latent_size=512,
                 channels=3):
        super(EncoderNoStyle, self).__init__()
        self.maxf = maxf
        self.startf = startf
        self.layer_count = layer_count
        self.from_rgb = nn.ModuleList()
        self.channels = channels

        mul = 2
        inputs = startf
        self.encode_block: nn.ModuleList[DiscriminatorBlock] = nn.ModuleList()

        resolution = 2**(self.layer_count + 1)

        for i in range(self.layer_count):
            outputs = min(self.maxf, startf * mul)

            self.from_rgb.append(FromRGB(channels, inputs))

            fused_scale = resolution >= 128

            block = DiscriminatorBlock(inputs,
                                       outputs,
                                       last=False,
                                       fused_scale=fused_scale,
                                       dense=i == self.layer_count - 1)

            resolution //= 2

            self.encode_block.append(block)
            inputs = outputs
            mul *= 2

        self.fc2 = ln.Linear(inputs, latent_size, gain=1)
 def __init__(self, inputs, output, lrmul):
     super(MappingBlock, self).__init__()
     self.fc = ln.Linear(inputs, output, lrmul=lrmul)