from model import StyledGenerator, Discriminator import torch import numpy as np generator = StyledGenerator(flame_dim=159, all_stage_discrim=False, embedding_vocab_size=70_000, rendered_flame_ascondition=False, inst_norm=True, normal_maps_as_cond=True, core_tensor_res=4, use_styled_conv_stylegan2=True, n_mlp=8) # set all weights to 1s mdl_state = generator.state_dict() torch.manual_seed(2) # tot_params = 0 # for name in mdl_state: # if name.find('z_to_w') >= 0 or name.find('generator') >= 0 and name.find('embd') < 0 and \ # name.find('to_rgb.8') < 0 and name.find('to_rgb.7') < 0 and name.find('progression.8') < 0 \ # and name.find('progression.7') < 0: # print(name) # mdl_state[name] = mdl_state[name] * 0 + torch.randn(mdl_state[name].shape) # tot_params += np.prod(mdl_state[name].shape) # else: # mdl_state[name] = mdl_state[name] * 0 + 6e-3 # # print(f'Total set params are: {tot_params}') tot_params = 0
else: alpha = 0 ckpt_step = step resolution = 4 * 2**step image_loader = SymbolDataset(args.path, transform, resolution).set_attrs( batch_size=batch_size.get( resolution, batch_default), shuffle=True) train_loader = iter(image_loader) jt.save( { 'generator': netG.state_dict(), 'discriminator': netD.state_dict(), 'g_running': g_running.state_dict(), }, f'FFHQ/checkpoint/train_step-{ckpt_step}.model', ) try: real_image = next(train_loader) except (OSError, StopIteration): train_loader = iter(image_loader) real_image = next(train_loader) real_image.requires_grad = True b_size = real_image.size(0)