Exemplo n.º 1
0
    def __init__(self, inputs, outputs, last=False, fused_scale=True):
        super(DiscriminatorBlock, self).__init__()
        self.conv_1 = ln.Conv2d(inputs + (1 if last else 0),
                                inputs,
                                3,
                                1,
                                1,
                                bias=False)
        self.bias_1 = nn.Parameter(torch.Tensor(1, inputs, 1, 1))
        self.blur = Blur(inputs)
        self.last = last
        self.fused_scale = fused_scale
        if last:
            self.dense = ln.Linear(inputs * 4 * 4, outputs)
        else:
            if fused_scale:
                self.conv_2 = ln.Conv2d(inputs,
                                        outputs,
                                        3,
                                        2,
                                        1,
                                        bias=False,
                                        transform_kernel=True)
            else:
                self.conv_2 = ln.Conv2d(inputs, outputs, 3, 1, 1, bias=False)

        self.bias_2 = nn.Parameter(torch.Tensor(1, outputs, 1, 1))

        with torch.no_grad():
            self.bias_1.zero_()
            self.bias_2.zero_()
Exemplo n.º 2
0
    def __init__(self, inputs, outputs, latent_size, has_last_conv=True, fused_scale=True): #分辨率大于128用fused_scale,即conv完成上采样
        super().__init__()
        self.has_last_conv = has_last_conv
        self.noise_weight_1 = nn.Parameter(torch.Tensor(1, inputs, 1, 1))
        self.noise_weight_1.data.zero_()
        self.bias_1 = nn.Parameter(torch.Tensor(1, inputs, 1, 1))
        self.instance_norm_1 = nn.InstanceNorm2d(inputs, affine=False, eps=1e-8)
        self.inver_mod1 = ln.Linear(2 * inputs, latent_size, gain=1) # [n, 2c] -> [n,512]
        self.conv_1 = ln.Conv2d(inputs, inputs, 3, 1, 1, bias=False)

        self.noise_weight_2 = nn.Parameter(torch.Tensor(1, inputs, 1, 1))
        self.noise_weight_2.data.zero_()
        self.bias_2 = nn.Parameter(torch.Tensor(1, inputs, 1, 1))
        self.instance_norm_2 = nn.InstanceNorm2d(inputs, affine=False, eps=1e-8)
        self.inver_mod2 = ln.Linear(2 * inputs, latent_size, gain=1)
        self.blur = Blur(inputs)
        if has_last_conv:
            if fused_scale:
                self.conv_2 = ln.Conv2d(inputs, outputs, 3, 2, 1, bias=False, transform_kernel=True)
            else:
                self.conv_2 = ln.Conv2d(inputs, outputs, 3, 1, 1, bias=False)
        self.fused_scale = fused_scale
        
        self.inputs = inputs
        self.outputs = outputs

        if self.inputs != self.outputs:
            self.conv_3 = ln.Conv2d(inputs, outputs, 1, 1, 0)

        with torch.no_grad():
            self.bias_1.zero_()
            self.bias_2.zero_()
Exemplo n.º 3
0
    def __init__(self,
                 inputs,
                 outputs,
                 latent_size,
                 has_first_conv=True,
                 fused_scale=True):
        super(DecodeBlock, self).__init__()
        self.has_first_conv = has_first_conv
        self.inputs = inputs
        self.has_first_conv = has_first_conv
        self.fused_scale = fused_scale
        if has_first_conv:
            if fused_scale:
                self.conv_1 = ln.ConvTranspose2d(inputs,
                                                 outputs,
                                                 3,
                                                 2,
                                                 1,
                                                 bias=False,
                                                 transform_kernel=True)
            else:
                self.conv_1 = ln.Conv2d(inputs, outputs, 3, 1, 1, bias=False)

        self.blur = Blur(outputs)
        self.noise_weight_1 = nn.Parameter(torch.Tensor(1, outputs, 1, 1))
        self.noise_weight_1.data.zero_()
        self.bias_1 = nn.Parameter(torch.Tensor(1, outputs, 1, 1))
        self.instance_norm_1 = nn.InstanceNorm2d(outputs,
                                                 affine=False,
                                                 eps=1e-8)
        self.style_1 = ln.Linear(latent_size, 2 * outputs, gain=1)

        self.conv_2 = ln.Conv2d(outputs, outputs, 3, 1, 1, bias=False)
        self.noise_weight_2 = nn.Parameter(torch.Tensor(1, outputs, 1, 1))
        self.noise_weight_2.data.zero_()
        self.bias_2 = nn.Parameter(torch.Tensor(1, outputs, 1, 1))
        self.instance_norm_2 = nn.InstanceNorm2d(outputs,
                                                 affine=False,
                                                 eps=1e-8)
        self.style_2 = ln.Linear(latent_size, 2 * outputs, gain=1)

        with torch.no_grad():
            self.bias_1.zero_()
            self.bias_2.zero_()
Exemplo n.º 4
0
 def __init__(self, inputs, channels):
     super(ToRGB, self).__init__()
     self.inputs = inputs
     self.channels = channels
     self.to_rgb = ln.Conv2d(inputs, channels, 1, 1, 0, gain=1)
Exemplo n.º 5
0
 def __init__(self, channels, outputs):
     super(FromRGB, self).__init__()
     self.from_rgb = ln.Conv2d(channels, outputs, 1, 1, 0)