if opt.pretrained: model_G = os.path.join(opt.save_folder + opt.pretrained_sr) model_D = os.path.join(opt.save_folder + opt.pretrained_D) if os.path.exists(model_G): G.load_state_dict( torch.load(model_G, map_location=lambda storage, loc: storage)) print('Pre-trained Generator model is loaded.') if os.path.exists(model_D): D.load_state_dict( torch.load(model_D, map_location=lambda storage, loc: storage)) print('Pre-trained Discriminator model is loaded.') if cuda: denoiser = denoiser.cuda(gpus_list[0]) G = G.cuda(gpus_list[0]) D = D.cuda(gpus_list[0]) HR_feat_extractor = HR_feat_extractor.cuda(gpus_list[0]) feat_extractor = feat_extractor.cuda(gpus_list[0]) L1_loss = L1_loss.cuda(gpus_list[0]) BCE_loss = BCE_loss.cuda(gpus_list[0]) Lap_loss = Lap_loss.cuda(gpus_list[0]) G_optimizer = optim.Adam(G.parameters(), lr=opt.lr, weight_decay=0, betas=(0.9, 0.999), eps=1e-8) D_optimizer = optim.Adam(D.parameters(), lr=opt.lr, weight_decay=0,
torch.cuda.manual_seed(opt.seed) print('===> Building model ', opt.model_type) denoiser = VAE_denoise_vali(input_dim=3, dim=32, feat_size=8, z_dim=512, prior='standard') model = VAE_SR(input_dim=3, dim=64, scale_factor=opt.upscale_factor) denoiser = torch.nn.DataParallel(denoiser, device_ids=gpus_list) model = torch.nn.DataParallel(model, device_ids=gpus_list) if cuda: denoiser = denoiser.cuda(gpus_list[0]) model = model.cuda(gpus_list[0]) print('===> Loading datasets') if os.path.exists(opt.model_denoiser): # denoiser.load_state_dict(torch.load(opt.model_denoiser, map_location=lambda storage, loc: storage)) pretrained_dict = torch.load(opt.model_denoiser, map_location=lambda storage, loc: storage) model_dict = denoiser.state_dict() pretrained_dict = { k: v for k, v in pretrained_dict.items() if k in model_dict } model_dict.update(pretrained_dict) denoiser.load_state_dict(model_dict) print('Pre-trained Denoiser model is loaded.')