def calculate_stylegan2_fid(): device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') parser = argparse.ArgumentParser() parser.add_argument('ckpt', type=str, help='Path to the stylegan2 checkpoint.') parser.add_argument('fid_stats', type=str, help='Path to the dataset fid statistics.') parser.add_argument('--size', type=int, default=256) parser.add_argument('--channel_multiplier', type=int, default=2) parser.add_argument('--batch_size', type=int, default=64) parser.add_argument('--num_sample', type=int, default=50000) parser.add_argument('--truncation', type=float, default=1) parser.add_argument('--truncation_mean', type=int, default=4096) args = parser.parse_args() # create stylegan2 model generator = StyleGAN2Generator( out_size=args.size, num_style_feat=512, num_mlp=8, channel_multiplier=args.channel_multiplier, resample_kernel=(1, 3, 3, 1)) generator.load_state_dict(torch.load(args.ckpt)['params_ema']) generator = nn.DataParallel(generator).eval().to(device) if args.truncation < 1: with torch.no_grad(): truncation_latent = generator.mean_latent(args.truncation_mean) else: truncation_latent = None # inception model inception = load_patched_inception_v3(device) total_batch = math.ceil(args.num_sample / args.batch_size) def sample_generator(total_batch): for _ in range(total_batch): with torch.no_grad(): latent = torch.randn(args.batch_size, 512, device=device) samples, _ = generator([latent], truncation=args.truncation, truncation_latent=truncation_latent) yield samples features = extract_inception_features(sample_generator(total_batch), inception, total_batch, device) features = features.numpy() total_len = features.shape[0] features = features[:args.num_sample] print(f'Extracted {total_len} features, use the first {features.shape[0]} features to calculate stats.') sample_mean = np.mean(features, 0) sample_cov = np.cov(features, rowvar=False) # load the dataset stats stats = torch.load(args.fid_stats) real_mean = stats['mean'] real_cov = stats['cov'] # calculate FID metric fid = calculate_fid(sample_mean, sample_cov, real_mean, real_cov) print('fid:', fid)
def calculate_fid_folder(): device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') parser = argparse.ArgumentParser() parser.add_argument('folder', type=str, help='Path to the folder.') parser.add_argument('--fid_stats', type=str, help='Path to the dataset fid statistics.') parser.add_argument('--batch_size', type=int, default=64) parser.add_argument('--num_sample', type=int, default=50000) parser.add_argument('--num_workers', type=int, default=4) parser.add_argument('--backend', type=str, default='disk', help='io backend for dataset. Option: disk, lmdb') args = parser.parse_args() # inception model inception = load_patched_inception_v3(device) # create dataset opt = {} opt['name'] = 'SingleImageDataset' opt['type'] = 'SingleImageDataset' opt['dataroot_lq'] = args.folder opt['io_backend'] = dict(type=args.backend) opt['mean'] = [0.5, 0.5, 0.5] opt['std'] = [0.5, 0.5, 0.5] dataset = build_dataset(opt) # create dataloader data_loader = DataLoader(dataset=dataset, batch_size=args.batch_size, shuffle=False, num_workers=args.num_workers, sampler=None, drop_last=False) args.num_sample = min(args.num_sample, len(dataset)) total_batch = math.ceil(args.num_sample / args.batch_size) def data_generator(data_loader, total_batch): for idx, data in enumerate(data_loader): if idx >= total_batch: break else: yield data['lq'] features = extract_inception_features( data_generator(data_loader, total_batch), inception, total_batch, device) features = features.numpy() total_len = features.shape[0] features = features[:args.num_sample] print(f'Extracted {total_len} features, ' f'use the first {features.shape[0]} features to calculate stats.') sample_mean = np.mean(features, 0) sample_cov = np.cov(features, rowvar=False) # load the dataset stats stats = torch.load(args.fid_stats) real_mean = stats['mean'] real_cov = stats['cov'] # calculate FID metric fid = calculate_fid(sample_mean, sample_cov, real_mean, real_cov) print('fid:', fid)