# netG = Networks.Generator(depth=9, latent_size=512) # z = z.squeeze(2).squeeze(2) # x_ = netG(z,depth=8,alpha=1) # print(z.shape) # print(x_.shape) #----------------test pre-model output----------- def toggle_grad(model, requires_grad): for p in model.parameters(): p.requires_grad_(requires_grad) netG = torch.nn.DataParallel( net.Generator(depth=9, latent_size=512) ) # in: [-1,512], depth:0-4,1-8,2-16,3-32,4-64,5-128,6-256,7-512,8-1024 netG.load_state_dict( torch.load('./pre-model/GAN_GEN_SHADOW_8.pth', map_location=device)) #shadow的效果要好一些 netD1 = torch.nn.DataParallel( net.Discriminator(height=9, feature_size=512) ) # in: [-1,3,1024,1024],out:[], depth:0-4,1-8,2-16,3-32,4-64,5-128,6-256,7-512,8-1024 netD1.load_state_dict( torch.load('./pre-model/GAN_DIS_8.pth', map_location=device)) netD2 = torch.nn.DataParallel(Encoder.encoder_v1(height=9, feature_size=512)) #netD2 = torch.nn.DataParallel(Encoder.encoder_v2()) #新结构,不需要参数 toggle_grad(netD1, False) toggle_grad(netD2, False)
os.mkdir(resultPath1_1) resultPath1_2 = resultPath + "/models" if not os.path.exists(resultPath1_2): os.mkdir(resultPath1_2) #----------------test pre-model output----------- def toggle_grad(model, requires_grad): for p in model.parameters(): p.requires_grad_(requires_grad) netG = torch.nn.DataParallel( net.Generator(depth=9, latent_size=512) ) # in: [-1,512], depth:0-4,1-8,2-16,3-32,4-64,5-128,6-256,7-512,8-1024 netG.load_state_dict( torch.load('./pre-model/GAN_GEN_SHADOW_8.pth', map_location=device)) #shadow的效果要好一些 netE = torch.nn.DataParallel(Encoder.encoder_v1(height=9, feature_size=512)) #netE.load_state_dict(torch.load('./pre-model/D2E_std_L2_ep9.pth',map_location=device)) netE.load_state_dict( torch.load('./pre-model/D_all_Loss_ep19.pth', map_location=device)) #-------------load single image-------------- loader = torchvision.transforms.Compose([torchvision.transforms.ToTensor()]) from PIL import Image
# print(type(dict2)) # keys = [] # dict3 = {} # for i,j in dict1.items(): # if i.startswith('fc'): # print(i) # continue # keys.append(i) # dict3 = {k:dict1[k] for k in keys} # print(dict3.keys()) from pro_gan_pytorch import Encoder, Networks as net netG1 = torch.nn.DataParallel(net.Generator(depth=9, latent_size=512)).to( device ) # in: [-1,512], depth:0-4,1-8,2-16,3-32,4-64,5-128,6-256,7-512,8-1024 #netG1.load_state_dict(torch.load('D:\\AI-Lab\\PGGAN-TA\\result\\Step2_Training_EL2_GL2\\models\\G_model_ep0.pth',map_location=device)) #netG1.load_state_dict(torch.load('./pre-model/GAN_GEN_SHADOW_8.pth',map_location=device)) #netG1.load_state_dict(torch.load('D:\\AI-Lab\\PGGAN-TA\\result\\Step2_G-allLoss-allLoss_wwm\\models\\G_model_ep3.pth',map_location=device)) netG1.load_state_dict( torch.load( 'D:\\AI-Lab\\PGGAN-TA\\result\\Step2_G_wwm_allLoss\\models\\G_model_ep8.pth', map_location=device)) netEn = torch.nn.DataParallel(Encoder.encoder_v1(height=9, feature_size=512)) #netEn.load_state_dict(torch.load('./pre-model/D2E_std_L2_ep9.pth',map_location=device)) netEn.load_state_dict( torch.load('./pre-model/D_all_Loss_ep19.pth', map_location=device)) #--------------操作 y-> z -> x-------------
resultPath1_2 = resultPath + "/models" if not os.path.exists(resultPath1_2): os.mkdir(resultPath1_2) #----------------pre-model----------- def toggle_grad(model, requires_grad): for p in model.parameters(): p.requires_grad_(requires_grad) in_dim = 512 netG1 = torch.nn.DataParallel( net1.Generator(depth=9, latent_size=in_dim) ) # in: [-1,512], depth:0-4,1-8,2-16,3-32,4-64,5-128,6-256,7-512,8-1024 netG1.load_state_dict( torch.load('./pre-model/GAN_GEN_SHADOW_8.pth', map_location=device)) #shadow的效果要好一些 netD1 = torch.nn.DataParallel( net1.Discriminator(height=9, feature_size=in_dim) ) # in: [-1,3,1024,1024],out:[], depth:0-4,1-8,2-16,3-32,4-64,5-128,6-256,7-512,8-1024 netD1.load_state_dict( torch.load('./pre-model/GAN_DIS_8.pth', map_location=device)) netG2 = torch.nn.DataParallel(net2.Decoder_v1(depth=9, latent_size=in_dim)) netD2 = torch.nn.DataParallel(net2.Encoder_v1(height=9, feature_size=in_dim)) #新结构,不需要参数 toggle_grad(netD1, False)