def run(args): resize_dims = (256, 256) if args.is_cars: resize_dims = (192, 256) transform = transforms.Compose([ transforms.Resize(resize_dims), transforms.ToTensor(), transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5]) ]) print('Loading dataset') dataset = GTResDataset(root_path=args.data_path, gt_dir=args.gt_path, transform=transform) dataloader = DataLoader(dataset, batch_size=args.batch_size, shuffle=False, num_workers=int(args.workers), drop_last=True) if args.mode == 'lpips': loss_func = LPIPS(net_type='alex') elif args.mode == 'l2': loss_func = torch.nn.MSELoss() else: raise Exception('Not a valid mode!') loss_func.cuda() global_i = 0 scores_dict = {} all_scores = [] for result_batch, gt_batch in tqdm(dataloader): for i in range(args.batch_size): loss = float( loss_func(result_batch[i:i + 1].cuda(), gt_batch[i:i + 1].cuda())) all_scores.append(loss) im_path = dataset.pairs[global_i][0] scores_dict[os.path.basename(im_path)] = loss global_i += 1 all_scores = list(scores_dict.values()) mean = np.mean(all_scores) std = np.std(all_scores) result_str = 'Average loss is {:.2f}+-{:.2f}'.format(mean, std) print('Finished with ', args.data_path) print(result_str) out_path = os.path.join(os.path.dirname(args.data_path), 'inference_metrics') if not os.path.exists(out_path): os.makedirs(out_path) with open(os.path.join(out_path, 'stat_{}.txt'.format(args.mode)), 'w') as f: f.write(result_str) with open(os.path.join(out_path, 'scores_{}.json'.format(args.mode)), 'w') as f: json.dump(scores_dict, f)
def __init__(self, opts): self.opts = opts self.global_step = 0 self.device = 'cuda:0' # TODO: Allow multiple GPU? currently using CUDA_VISIBLE_DEVICES self.opts.device = self.device if self.opts.use_wandb: from utils.wandb_utils import WBLogger self.wb_logger = WBLogger(self.opts) # Initialize network self.net = pSp(self.opts).to(self.device) # Estimate latent_avg via dense sampling if latent_avg is not available if self.net.latent_avg is None: self.net.latent_avg = self.net.decoder.mean_latent(int(1e5))[0].detach() # Initialize loss if self.opts.id_lambda > 0 and self.opts.moco_lambda > 0: raise ValueError('Both ID and MoCo loss have lambdas > 0! Please select only one to have non-zero lambda!') self.mse_loss = nn.MSELoss().to(self.device).eval() if self.opts.lpips_lambda > 0: self.lpips_loss = LPIPS(net_type='alex').to(self.device).eval() if self.opts.id_lambda > 0: self.id_loss = id_loss.IDLoss().to(self.device).eval() if self.opts.w_norm_lambda > 0: self.w_norm_loss = w_norm.WNormLoss(start_from_latent_avg=self.opts.start_from_latent_avg) if self.opts.moco_lambda > 0: self.moco_loss = moco_loss.MocoLoss().to(self.device).eval() # Initialize optimizer self.optimizer = self.configure_optimizers() # Initialize dataset self.train_dataset, self.test_dataset = self.configure_datasets() self.train_dataloader = DataLoader(self.train_dataset, batch_size=self.opts.batch_size, shuffle=True, num_workers=int(self.opts.workers), drop_last=True) self.test_dataloader = DataLoader(self.test_dataset, batch_size=self.opts.test_batch_size, shuffle=False, num_workers=int(self.opts.test_workers), drop_last=True) # Initialize logger log_dir = os.path.join(opts.exp_dir, 'logs') os.makedirs(log_dir, exist_ok=True) self.logger = SummaryWriter(log_dir=log_dir) # Initialize checkpoint dir self.checkpoint_dir = os.path.join(opts.exp_dir, 'checkpoints') os.makedirs(self.checkpoint_dir, exist_ok=True) self.best_val_loss = None if self.opts.save_interval is None: self.opts.save_interval = self.opts.max_steps
def __init__(self, opts): self.opts = opts self.global_step = 0 self.device = "cuda:0" # TODO: Allow multiple GPU? currently using CUDA_VISIBLE_DEVICES self.opts.device = self.device # Initialize network self.net = pSp(self.opts).to(self.device) # Initialize loss if self.opts.lpips_lambda > 0: self.lpips_loss = LPIPS(net_type="alex").to(self.device).eval() if self.opts.id_lambda > 0 or self.opts.id_lambda_input > 0: self.id_loss = id_loss.IDLoss().to(self.device).eval() if self.opts.w_norm_lambda > 0: self.w_norm_loss = w_norm.WNormLoss( start_from_latent_avg=self.opts.start_from_latent_avg ) self.mse_loss = nn.MSELoss().to(self.device).eval() # Initialize optimizer self.optimizer = self.configure_optimizers() # Initialize dataset self.train_dataset, self.test_dataset = self.configure_datasets() self.train_dataloader = DataLoader( self.train_dataset, batch_size=self.opts.batch_size, shuffle=True, num_workers=int(self.opts.workers), drop_last=True, ) self.test_dataloader = DataLoader( self.test_dataset, batch_size=self.opts.test_batch_size, shuffle=False, num_workers=int(self.opts.test_workers), drop_last=False, ) # Initialize logger log_dir = os.path.join(opts.exp_dir, "logs") os.makedirs(log_dir, exist_ok=True) self.logger = SummaryWriter(log_dir=log_dir) # Initialize checkpoint dir self.checkpoint_dir = os.path.join(opts.exp_dir, "checkpoints") os.makedirs(self.checkpoint_dir, exist_ok=True) self.best_val_loss = None if self.opts.save_interval is None: self.opts.save_interval = self.opts.max_steps
def __init__(self, opts): self.opts = opts self.global_step = 0 self.device = 'cuda' self.opts.device = self.device # Initialize network self.net = pSp(self.opts).to(self.device) # Initialize loss self.mse_loss = nn.MSELoss().to(self.device).eval() if self.opts.lpips_lambda > 0: self.lpips_loss = LPIPS(net_type='alex').to(self.device).eval() if self.opts.id_lambda > 0: self.id_loss = id_loss.IDLoss().to(self.device).eval() if self.opts.w_norm_lambda > 0: self.w_norm_loss = w_norm.WNormLoss(opts=self.opts) if self.opts.aging_lambda > 0: self.aging_loss = AgingLoss(self.opts) # Initialize optimizer self.optimizer = self.configure_optimizers() # Initialize dataset self.train_dataset, self.test_dataset = self.configure_datasets() self.train_dataloader = DataLoader(self.train_dataset, batch_size=self.opts.batch_size, shuffle=True, num_workers=int(self.opts.workers), drop_last=True) self.test_dataloader = DataLoader(self.test_dataset, batch_size=self.opts.test_batch_size, shuffle=False, num_workers=int( self.opts.test_workers), drop_last=True) self.age_transformer = AgeTransformer(target_age=self.opts.target_age) # Initialize logger log_dir = os.path.join(opts.exp_dir, 'logs') os.makedirs(log_dir, exist_ok=True) self.logger = SummaryWriter(log_dir=log_dir) # Initialize checkpoint dir self.checkpoint_dir = os.path.join(opts.exp_dir, 'checkpoints') os.makedirs(self.checkpoint_dir, exist_ok=True) self.best_val_loss = None if self.opts.save_interval is None: self.opts.save_interval = self.opts.max_steps
def __init__(self, opts): self.opts = opts self.global_step = 0 self.device = 'cuda:0' # TODO: Allow multiple GPU? currently using CUDA_VISIBLE_DEVICES self.opts.device = self.device # Initialize network self.net = FingerGen(self.opts, resize_factor=opts.generator_image_size).to( self.device) # Initialize loss if self.opts.lpips_lambda > 0: self.lpips_loss = LPIPS(net_type='alex').to(self.device).eval() if self.opts.fingernet_lambda > 0: self.fingernet_loss = FingerNetLoss(opts.label_nc).to( self.device).eval() self.mse_loss = nn.MSELoss().to(self.device).eval() # Initialize optimizer self.optimizer = self.configure_optimizers() # Initialize dataset self.train_dataset, self.test_dataset = self.configure_datasets() self.train_dataloader = DataLoader(self.train_dataset, batch_size=self.opts.batch_size, shuffle=True, num_workers=int(self.opts.workers), drop_last=True) self.test_dataloader = DataLoader(self.test_dataset, batch_size=self.opts.test_batch_size, shuffle=False, num_workers=int( self.opts.test_workers), drop_last=True) # Initialize logger log_dir = os.path.join(opts.exp_dir, 'logs') os.makedirs(log_dir, exist_ok=True) self.logger = SummaryWriter(log_dir=log_dir) # Initialize checkpoint dir self.checkpoint_dir = os.path.join(opts.exp_dir, 'checkpoints') os.makedirs(self.checkpoint_dir, exist_ok=True) self.best_val_loss = None if self.opts.save_interval is None: self.opts.save_interval = self.opts.max_steps
def __init__(self, opts, prev_train_checkpoint=None): self.opts = opts self.global_step = 0 self.device = 'cuda:0' self.opts.device = self.device # Initialize network self.net = e4e(self.opts).to(self.device) # Estimate latent_avg via dense sampling if latent_avg is not available if self.net.latent_avg is None: self.net.latent_avg = self.net.decoder.mean_latent( int(1e5))[0].detach() # get the image corresponding to the latent average self.avg_image = self.net(self.net.latent_avg.unsqueeze(0), input_code=True, randomize_noise=False, return_latents=False, average_code=True)[0] self.avg_image = self.avg_image.to(self.device).float().detach() if self.opts.dataset_type == "cars_encode": self.avg_image = self.avg_image[:, 32:224, :] common.tensor2im(self.avg_image).save( os.path.join(self.opts.exp_dir, 'avg_image.jpg')) # Initialize loss if self.opts.id_lambda > 0 and self.opts.moco_lambda > 0: raise ValueError( 'Both ID and MoCo loss have lambdas > 0! Please select only one to have non-zero lambda!' ) self.mse_loss = nn.MSELoss().to(self.device).eval() if self.opts.lpips_lambda > 0: self.lpips_loss = LPIPS(net_type='alex').to(self.device).eval() if self.opts.id_lambda > 0: self.id_loss = id_loss.IDLoss().to(self.device).eval() if self.opts.moco_lambda > 0: self.moco_loss = moco_loss.MocoLoss() # Initialize optimizer self.optimizer = self.configure_optimizers() # Initialize discriminator if self.opts.w_discriminator_lambda > 0: self.discriminator = LatentCodesDiscriminator(512, 4).to(self.device) self.discriminator_optimizer = torch.optim.Adam( list(self.discriminator.parameters()), lr=opts.w_discriminator_lr) self.real_w_pool = LatentCodesPool(self.opts.w_pool_size) self.fake_w_pool = LatentCodesPool(self.opts.w_pool_size) # Initialize dataset self.train_dataset, self.test_dataset = self.configure_datasets() self.train_dataloader = DataLoader(self.train_dataset, batch_size=self.opts.batch_size, shuffle=True, num_workers=int(self.opts.workers), drop_last=True) self.test_dataloader = DataLoader(self.test_dataset, batch_size=self.opts.test_batch_size, shuffle=False, num_workers=int( self.opts.test_workers), drop_last=True) # Initialize logger log_dir = os.path.join(opts.exp_dir, 'logs') os.makedirs(log_dir, exist_ok=True) self.logger = SummaryWriter(log_dir=log_dir) # Initialize checkpoint dir self.checkpoint_dir = os.path.join(opts.exp_dir, 'checkpoints') os.makedirs(self.checkpoint_dir, exist_ok=True) self.best_val_loss = None if self.opts.save_interval is None: self.opts.save_interval = self.opts.max_steps if prev_train_checkpoint is not None: self.load_from_train_checkpoint(prev_train_checkpoint) prev_train_checkpoint = None
def __init__(self, opts): self.opts = opts self.global_step = 0 self.device = 'cuda:0' # TODO: Allow multiple GPU? currently using CUDA_VISIBLE_DEVICES self.opts.device = self.device # Initialize network self.net = pSp(self.opts).to(self.device) # get the image corresponding to the latent average self.avg_image = self.net(self.net.latent_avg.unsqueeze(0), input_code=True, randomize_noise=False, return_latents=False, average_code=True)[0] self.avg_image = self.avg_image.to(self.device).float().detach() if self.opts.dataset_type == "cars_encode": self.avg_image = self.avg_image[:, 32:224, :] common.tensor2im(self.avg_image).save( os.path.join(self.opts.exp_dir, 'avg_image.jpg')) # Initialize loss if self.opts.id_lambda > 0 and self.opts.moco_lambda > 0: raise ValueError( 'Both ID and MoCo loss have lambdas > 0! Please select only one to have non-zero lambda!' ) self.mse_loss = nn.MSELoss().to(self.device).eval() if self.opts.lpips_lambda > 0: self.lpips_loss = LPIPS(net_type='alex').to(self.device).eval() if self.opts.id_lambda > 0: self.id_loss = id_loss.IDLoss().to(self.device).eval() if self.opts.w_norm_lambda > 0: self.w_norm_loss = w_norm.WNormLoss( start_from_latent_avg=self.opts.start_from_latent_avg) if self.opts.moco_lambda > 0: self.moco_loss = moco_loss.MocoLoss() # Initialize optimizer self.optimizer = self.configure_optimizers() # Initialize dataset self.train_dataset, self.test_dataset = self.configure_datasets() self.train_dataloader = DataLoader(self.train_dataset, batch_size=self.opts.batch_size, shuffle=True, num_workers=int(self.opts.workers), drop_last=True) self.test_dataloader = DataLoader(self.test_dataset, batch_size=self.opts.test_batch_size, shuffle=False, num_workers=int( self.opts.test_workers), drop_last=True) # Initialize logger log_dir = os.path.join(opts.exp_dir, 'logs') os.makedirs(log_dir, exist_ok=True) self.logger = SummaryWriter(log_dir=log_dir) # Initialize checkpoint dir self.checkpoint_dir = os.path.join(opts.exp_dir, 'checkpoints') os.makedirs(self.checkpoint_dir, exist_ok=True) self.best_val_loss = None if self.opts.save_interval is None: self.opts.save_interval = self.opts.max_steps
def __init__(self, opts, prev_train_checkpoint=None): self.opts = opts self.global_step = 0 self.device = 'cuda:0' self.opts.device = self.device # Initialize network self.net = pSp(self.opts).to(self.device) # Initialize loss if self.opts.lpips_lambda > 0: self.lpips_loss = LPIPS(net_type=self.opts.lpips_type).to( self.device).eval() if self.opts.id_lambda > 0: if 'ffhq' in self.opts.dataset_type or 'celeb' in self.opts.dataset_type: self.id_loss = id_loss.IDLoss().to(self.device).eval() else: self.id_loss = moco_loss.MocoLoss(opts).to(self.device).eval() self.mse_loss = nn.MSELoss().to(self.device).eval() # Initialize optimizer self.optimizer = self.configure_optimizers() # Initialize discriminator if self.opts.w_discriminator_lambda > 0: self.discriminator = LatentCodesDiscriminator(512, 4).to(self.device) self.discriminator_optimizer = torch.optim.Adam( list(self.discriminator.parameters()), lr=opts.w_discriminator_lr) self.real_w_pool = LatentCodesPool(self.opts.w_pool_size) self.fake_w_pool = LatentCodesPool(self.opts.w_pool_size) # Initialize dataset self.train_dataset, self.test_dataset = self.configure_datasets() self.train_dataloader = DataLoader(self.train_dataset, batch_size=self.opts.batch_size, shuffle=True, num_workers=int(self.opts.workers), drop_last=True) self.test_dataloader = DataLoader(self.test_dataset, batch_size=self.opts.test_batch_size, shuffle=False, num_workers=int( self.opts.test_workers), drop_last=True) # Initialize logger log_dir = os.path.join(opts.exp_dir, 'logs') os.makedirs(log_dir, exist_ok=True) self.logger = SummaryWriter(log_dir=log_dir) # Initialize checkpoint dir self.checkpoint_dir = os.path.join(opts.exp_dir, 'checkpoints') os.makedirs(self.checkpoint_dir, exist_ok=True) self.best_val_loss = None if self.opts.save_interval is None: self.opts.save_interval = self.opts.max_steps if prev_train_checkpoint is not None: self.load_from_train_checkpoint(prev_train_checkpoint) prev_train_checkpoint = None