def __init__(self, n_mlp=8, embedding_vocab_size=1, rendered_flame_ascondition=False, normal_maps_as_cond=False, core_tensor_res=4, w_truncation_factor=1.0, apply_sqrt2_fac_in_eq_lin=False): super().__init__() noise_in_dims = int(rendered_flame_ascondition * 3 + normal_maps_as_cond * 3) with graph_writer.ModuleSpace('StyledGenerator'): self.core_tensor_res = core_tensor_res self.rendered_flame_ascondition = rendered_flame_ascondition self.normal_maps_as_cond = normal_maps_as_cond self.w_truncation_factor = w_truncation_factor self.mean_w = None code_dim = 512 self.generator = graph_writer.CallWrapper(Generator( code_dim, core_tensor_res=core_tensor_res, noise_in_dims=noise_in_dims, apply_sqrt2_fac_in_eq_lin=apply_sqrt2_fac_in_eq_lin)) if embedding_vocab_size > 1: self.embedding_vocab_size = embedding_vocab_size self.image_embedding = ImgEmbedding(vector_size=code_dim, vocab_size=self.embedding_vocab_size) self.img_embdng = graph_writer.CallWrapper(self.image_embedding) style_lin_layers = get_w_frm_z(n_mlp, style_dim=code_dim, lr_mlp=0.01, scale_weight=1.0) self.z_to_w = graph_writer.CallWrapper(style_lin_layers, node_tracing_name='Style_Transfom') transform_params = 0 for discrim_params in self.z_to_w.parameters(): transform_params += np.prod(discrim_params.shape) print(f'generator z_to_w n_params: {transform_params}')
def __init__(self, code_dim, core_tensor_res=4, channel_multiplier=2, noise_in_dims=None, apply_sqrt2_fac_in_eq_lin=False): super().__init__() # changes the architecture too much otherwise assert(core_tensor_res < 64) assert (code_dim == 512) # ex_cha_mult = 2 # Set to 1 to restore original StyleGAN code ex_cha_mult = 1 # Set to 1 to restore original StyleGAN code self.start_step = int(np.log2(core_tensor_res)) - 2 with graph_writer.ModuleSpace('Generator'): self.const_input = ConstantInput(512, size=core_tensor_res) blur_kernel = [1, 3, 3, 1] self.progression = nn.ModuleList( [graph_writer.CallWrapper(StyledConvStyleGAN2( code_dim, 512*ex_cha_mult, 3, blur_kernel=blur_kernel, one_conv_block=True, noise_in_dims=noise_in_dims, apply_sqrt2_fac_in_eq_lin=apply_sqrt2_fac_in_eq_lin)), # 4X4 graph_writer.CallWrapper(StyledConvStyleGAN2( 512*ex_cha_mult, 512*ex_cha_mult, 3, blur_kernel=blur_kernel, noise_in_dims=noise_in_dims, apply_sqrt2_fac_in_eq_lin=apply_sqrt2_fac_in_eq_lin)), graph_writer.CallWrapper(StyledConvStyleGAN2( 512*ex_cha_mult, 512*ex_cha_mult, 3, blur_kernel=blur_kernel, noise_in_dims=noise_in_dims, apply_sqrt2_fac_in_eq_lin=apply_sqrt2_fac_in_eq_lin)), graph_writer.CallWrapper(StyledConvStyleGAN2( 512*ex_cha_mult, 512*ex_cha_mult, 3, blur_kernel=blur_kernel, noise_in_dims=noise_in_dims, apply_sqrt2_fac_in_eq_lin=apply_sqrt2_fac_in_eq_lin)), graph_writer.CallWrapper(StyledConvStyleGAN2( 512*ex_cha_mult, 256*channel_multiplier*ex_cha_mult, 3, blur_kernel=blur_kernel, noise_in_dims=noise_in_dims, apply_sqrt2_fac_in_eq_lin=apply_sqrt2_fac_in_eq_lin)), graph_writer.CallWrapper(StyledConvStyleGAN2( 256*ex_cha_mult*channel_multiplier, 128*channel_multiplier*ex_cha_mult, 3, blur_kernel=blur_kernel, noise_in_dims=noise_in_dims, apply_sqrt2_fac_in_eq_lin=apply_sqrt2_fac_in_eq_lin)), graph_writer.CallWrapper(StyledConvStyleGAN2( 128*ex_cha_mult*channel_multiplier, 64*channel_multiplier*ex_cha_mult, 3, blur_kernel=blur_kernel, noise_in_dims=noise_in_dims, apply_sqrt2_fac_in_eq_lin=apply_sqrt2_fac_in_eq_lin)), graph_writer.CallWrapper(StyledConvStyleGAN2( 64*channel_multiplier*ex_cha_mult, 32*channel_multiplier*ex_cha_mult, 3, blur_kernel=blur_kernel, noise_in_dims=noise_in_dims, apply_sqrt2_fac_in_eq_lin=apply_sqrt2_fac_in_eq_lin)), graph_writer.CallWrapper(StyledConvStyleGAN2( 32*channel_multiplier*ex_cha_mult, 16*channel_multiplier*ex_cha_mult, 3, blur_kernel=blur_kernel, noise_in_dims=noise_in_dims, apply_sqrt2_fac_in_eq_lin=apply_sqrt2_fac_in_eq_lin)),] ) self.to_rgb = nn.ModuleList( [ graph_writer.CallWrapper(ToRGB(code_dim*ex_cha_mult, code_dim, upsample=False, apply_sqrt2_fac_in_eq_lin=apply_sqrt2_fac_in_eq_lin)), graph_writer.CallWrapper(ToRGB(512*ex_cha_mult, code_dim, apply_sqrt2_fac_in_eq_lin=apply_sqrt2_fac_in_eq_lin)), graph_writer.CallWrapper(ToRGB(512*ex_cha_mult, code_dim, apply_sqrt2_fac_in_eq_lin=apply_sqrt2_fac_in_eq_lin)), graph_writer.CallWrapper(ToRGB(512*ex_cha_mult, code_dim, apply_sqrt2_fac_in_eq_lin=apply_sqrt2_fac_in_eq_lin)), graph_writer.CallWrapper(ToRGB(256*ex_cha_mult * channel_multiplier, code_dim, apply_sqrt2_fac_in_eq_lin=apply_sqrt2_fac_in_eq_lin)), graph_writer.CallWrapper(ToRGB(128*ex_cha_mult * channel_multiplier, code_dim, apply_sqrt2_fac_in_eq_lin=apply_sqrt2_fac_in_eq_lin)), graph_writer.CallWrapper(ToRGB(64*ex_cha_mult * channel_multiplier, code_dim, apply_sqrt2_fac_in_eq_lin=apply_sqrt2_fac_in_eq_lin)), graph_writer.CallWrapper(ToRGB(32*ex_cha_mult * channel_multiplier, code_dim, apply_sqrt2_fac_in_eq_lin=apply_sqrt2_fac_in_eq_lin)), graph_writer.CallWrapper(ToRGB(16*ex_cha_mult * channel_multiplier, code_dim, apply_sqrt2_fac_in_eq_lin=apply_sqrt2_fac_in_eq_lin)), ] ) tot_to_rgb_params = 0 for discrim_params in self.const_input.parameters(): tot_to_rgb_params += np.prod(discrim_params.shape) # print(f'rgb_{np.prod(discrim_params.shape)}') print(f'generator const_input n_params: {tot_to_rgb_params}') tot_to_rgb_params = 0 for discrim_params in self.to_rgb.parameters(): tot_to_rgb_params += np.prod(discrim_params.shape) # print(f'rgb_{np.prod(discrim_params.shape)}') print(f'generator to_rgb n_params: {tot_to_rgb_params}') tot_prog_params = 0 for discrim_params in self.progression.parameters(): tot_prog_params += np.prod(discrim_params.shape) # print(f'conv_{np.prod(discrim_params.shape)}') print(f'generator progression n_params: {tot_prog_params}')
def __init__( self, size, style_dim, n_mlp, channel_multiplier=2, blur_kernel=[1, 3, 3, 1], lr_mlp=0.01, ): super().__init__() self.size = size self.style_dim = style_dim with graph_writer.ModuleSpace('Generator'): self.style = get_w_frm_z(n_mlp, style_dim, lr_mlp) self.channels = { 4: 512, 8: 512, 16: 512, 32: 512, 64: 256 * channel_multiplier, 128: 128 * channel_multiplier, 256: 64 * channel_multiplier, 512: 32 * channel_multiplier, 1024: 16 * channel_multiplier, } self.input = graph_writer.CallWrapper( ConstantInput(self.channels[4])) self.conv1 = graph_writer.CallWrapper( StyledConv( self.channels[4], self.channels[4], 3, style_dim, blur_kernel=blur_kernel, noise_in_dims=1, )) self.to_rgb1 = graph_writer.CallWrapper( ToRGB(self.channels[4], style_dim, upsample=False)) self.log_size = int(math.log(size, 2)) self.num_layers = (self.log_size - 2) * 2 + 1 self.convs = nn.ModuleList() self.upsamples = nn.ModuleList() self.to_rgbs = nn.ModuleList() self.noises = nn.Module() in_channel = self.channels[4] for layer_idx in range(self.num_layers): res = (layer_idx + 5) // 2 shape = [1, 1, 2**res, 2**res] self.noises.register_buffer(f'noise_{layer_idx}', torch.randn(*shape)) for i in range(3, self.log_size + 1): out_channel = self.channels[2**i] self.convs.append( graph_writer.CallWrapper( StyledConv( in_channel, out_channel, 3, style_dim, upsample=True, blur_kernel=blur_kernel, noise_in_dims=1, ))) self.convs.append( graph_writer.CallWrapper( StyledConv( out_channel, out_channel, 3, style_dim, blur_kernel=blur_kernel, noise_in_dims=1, ))) self.to_rgbs.append( graph_writer.CallWrapper(ToRGB(out_channel, style_dim))) in_channel = out_channel self.n_latent = self.log_size * 2 - 2 tot_prog_params = 0 for discrim_params in self.noises.parameters(): tot_prog_params += np.prod(discrim_params.shape) # print(f'noises_{np.prod(discrim_params.shape)}') print(f'generator static noises n_params: {tot_prog_params}') tot_prog_params = 0 for discrim_params in self.upsamples.parameters(): tot_prog_params += np.prod(discrim_params.shape) # print(f'noises_{np.prod(discrim_params.shape)}') print(f'generator upsamples n_params: {tot_prog_params}') to_rgb_params_cnt = 0 for to_rgb_params in self.to_rgb1.parameters(): to_rgb_params_cnt += np.prod(to_rgb_params.shape) # print(f'rgb_{np.prod(to_rgb_params.shape)}') for to_rgb_params in self.to_rgbs.parameters(): to_rgb_params_cnt += np.prod(to_rgb_params.shape) # print(f'rgb_{np.prod(to_rgb_params.shape)}') print(f'generator to_rgb_params n_params: {to_rgb_params_cnt}') conv_params_cnt = 0 for conv_param in self.conv1.parameters(): conv_params_cnt += np.prod(conv_param.shape) # print(f'conv_{np.prod(conv_param.shape)}') for conv_param in self.convs.parameters(): conv_params_cnt += np.prod(conv_param.shape) # print(f'conv_{np.prod(conv_param.shape)}') print(f'generator conv_params_cnt n_params: {conv_params_cnt}')