def __init__(self,
                 input_channels,
                 output_channels,
                 downsample=True,
                 local_response_norm=False,
                 progan_var_input=False,
                 last_layer=False):
        super().__init__()
        self.input_channels = input_channels
        self.output_channels = output_channels
        self.progran_var_input = progan_var_input
        self.last_layer = last_layer

        conv_1_input_channels = input_channels + (1 if progan_var_input else 0)
        # According to the ProGAN paper appendix, the "hidden" number of channels should be the same as the input size
        conv_1_output_channels = output_channels if self.last_layer else input_channels
        self.conv_1 = Conv2dNormalizedLR(conv_1_input_channels,
                                         conv_1_output_channels,
                                         kernel_size=3,
                                         padding=1)
        if not self.last_layer:
            self.conv_2 = Conv2dNormalizedLR(input_channels,
                                             output_channels,
                                             kernel_size=3,
                                             padding=1)
        if self.input_channels != self.output_channels:
            self.conv_res = Conv2dNormalizedLR(input_channels,
                                               output_channels,
                                               kernel_size=1)
        self.conv_rgb = Conv2dNormalizedLR(3, input_channels, kernel_size=1)
        self.downsample = downsample
        self.lrn = local_response_norm
    def __init__(self, in_size, out_size, w_size):
        super().__init__()
        self.conv1 = Conv2dNormalizedLR(in_size, out_size, 3, padding=1)
        self.conv2 = Conv2dNormalizedLR(out_size, out_size, 3, padding=1)
        self.rgb = Conv2dNormalizedLR(3, in_size, 1)

        self.aff1 = Conv2dNormalizedLR(out_size*2, w_size, kernel_size=1, gain=1.0)
        self.aff2 = Conv2dNormalizedLR(out_size*2, w_size, kernel_size=1, gain=1.0)
    def __init__(self,
                 input_channels,
                 output_channels,
                 upsample=True,
                 local_response_norm=True,
                 weight_norm=False):
        super().__init__()
        self.input_channels = input_channels
        self.output_channels = output_channels
        self.weight_norm = weight_norm

        self.conv_1 = Conv2dTransposeNormalizedLR(input_channels,
                                                  output_channels,
                                                  kernel_size=3,
                                                  padding=1,
                                                  weight_norm=self.weight_norm)
        self.conv_2 = Conv2dTransposeNormalizedLR(output_channels,
                                                  output_channels,
                                                  kernel_size=3,
                                                  padding=1,
                                                  weight_norm=self.weight_norm)
        # Weight Norm is always disabled here because we don't want to normalize the RGB output
        self.conv_rgb = Conv2dNormalizedLR(output_channels,
                                           3,
                                           kernel_size=1,
                                           weight_norm=False)
        self.upsample = upsample
        self.lrn = local_response_norm
Exemple #4
0
    def __init__(self, in_size, out_size, w_size, is_start=False):
        super().__init__()
        self.is_start = is_start
        self.out_size = out_size

        self.conv1 = Conv2dTransposeNormalizedLRStyleGAN2(in_size,
                                                          out_size,
                                                          3,
                                                          padding=1,
                                                          bias=False)
        self.bias1 = torch.nn.Parameter(torch.zeros((1, out_size, 1, 1)))

        if is_start:
            self.start = torch.nn.Parameter(torch.ones((1, out_size, 4, 4)),
                                            requires_grad=True)

        self.conv2 = Conv2dTransposeNormalizedLRStyleGAN2(out_size,
                                                          out_size,
                                                          3,
                                                          padding=1,
                                                          bias=False)
        self.bias2 = torch.nn.Parameter(torch.zeros((1, out_size, 1, 1)))

        self.Aaff1 = Conv2dNormalizedLR(w_size,
                                        in_size,
                                        kernel_size=1,
                                        gain=1.0)
        self.Baff1 = Conv2dNormalizedLR(1,
                                        out_size,
                                        kernel_size=1,
                                        gain=1.0,
                                        bias=False)

        self.Aaff2 = Conv2dNormalizedLR(w_size,
                                        out_size,
                                        kernel_size=1,
                                        gain=1.0)

        self.Baff2 = Conv2dNormalizedLR(1,
                                        out_size,
                                        kernel_size=1,
                                        gain=1.0,
                                        bias=False)

        self.rgb = Conv2dNormalizedLR(out_size, 3, 1, gain=0.03)
        self.reset_parameters()
Exemple #5
0
    def __init__(self,
                 latent_size,
                 n_upscales,
                 output_h_size,
                 local_response_norm=True,
                 scaling_factor=2,
                 hypersphere_latent=True,
                 max_h_size: int = 1e10,
                 weight_norm=False):
        super().__init__()
        self.n_upscales = n_upscales
        self.output_h_size = output_h_size
        self.scaling_factor = scaling_factor
        self.weight_norm = weight_norm
        self.initial_size = min(
            int(output_h_size * self.scaling_factor**(n_upscales)), max_h_size)
        self.lrn = local_response_norm
        self.hypersphere_latent = hypersphere_latent

        self.inp_layer = LinearNormalizedLR(latent_size,
                                            self.initial_size * 4 * 4,
                                            weight_norm=self.weight_norm)
        self.init_layer = Conv2dTransposeNormalizedLR(
            self.initial_size,
            self.initial_size,
            kernel_size=3,
            padding=1,
            weight_norm=self.weight_norm)
        self.init_rgb = Conv2dNormalizedLR(self.initial_size,
                                           3,
                                           kernel_size=1,
                                           weight_norm=self.weight_norm)

        self.layer_list = []
        for i in range(n_upscales):
            inp_channels = min(
                int(output_h_size * self.scaling_factor**(n_upscales - i)),
                max_h_size)
            outp_channels = min(
                int(output_h_size * self.scaling_factor**(n_upscales - i - 1)),
                max_h_size)
            self.layer_list.append(
                ProGANUpBlock(inp_channels,
                              outp_channels,
                              local_response_norm=local_response_norm,
                              weight_norm=self.weight_norm))
        self.layers = torch.nn.ModuleList(self.layer_list)