def run(config): # Get loader config['drop_last'] = False loaders = utils.get_data_loaders(**config) # Load inception net net = inception_utils.load_inception_net(parallel=config['parallel']) pool, logits, labels = [], [], [] device = 'cuda' for i, (x, y) in enumerate(tqdm(loaders[0])): x = x.to(device) with torch.no_grad(): pool_val, logits_val = net(x) pool += [np.asarray(pool_val.cpu())] logits += [np.asarray(F.softmax(logits_val, 1).cpu())] labels += [np.asarray(y.cpu())] pool, logits, labels = [np.concatenate(item, 0) for item in [pool, logits, labels]] # uncomment to save pool, logits, and labels to disk # print('Saving pool, logits, and labels to disk...') # np.savez(config['dataset']+'_inception_activations.npz', # {'pool': pool, 'logits': logits, 'labels': labels}) # Calculate inception metrics and report them print('Calculating inception metrics...') IS_mean, IS_std = inception_utils.calculate_inception_score(logits) print('Training data from dataset %s has IS of %5.5f +/- %5.5f' % (config['dataset'], IS_mean, IS_std)) # Prepare mu and sigma, save to disk. Remove "hdf5" by default # (the FID code also knows to strip "hdf5") print('Calculating means and covariances...') mu, sigma = np.mean(pool, axis=0), np.cov(pool, rowvar=False) print('Saving calculated means and covariances to disk...') np.savez(config['dataset'].strip('_hdf5')+'_inception_moments.npz', **{'mu' : mu, 'sigma' : sigma})
def run(config): logger = logging.getLogger('tl') saved_inception_moments = global_cfg.saved_inception_moments.format( config['dataset']) # Get loader config['drop_last'] = False loaders = utils.get_data_loaders(use_data_root=True, **config) # Load inception net net = inception_utils.load_inception_net(parallel=config['parallel']) net.eval() # net.train() pool, logits, labels = [], [], [] device = 'cuda' pbar = tqdm(loaders[0], desc='accumulate pool and logits') for i, (x, y) in enumerate(pbar): x = x.to(device) with torch.no_grad(): pool_val, logits_val = net(x) pool += [np.asarray(pool_val.cpu())] logits += [np.asarray(F.softmax(logits_val, 1).cpu())] labels += [np.asarray(y.cpu())] pool, logits, labels = [ np.concatenate(item, 0) for item in [pool, logits, labels] ] # uncomment to save pool, logits, and labels to disk # print('Saving pool, logits, and labels to disk...') # np.savez(config['dataset']+'_inception_activations.npz', # {'pool': pool, 'logits': logits, 'labels': labels}) # Calculate inception metrics and report them logger.info('Calculating inception metrics...') IS_mean, IS_std = inception_utils.calculate_inception_score(logits) logger.info('Training data from dataset %s has IS of %5.5f +/- %5.5f' % (config['dataset'], IS_mean, IS_std)) # Prepare mu and sigma, save to disk. Remove "hdf5" by default # (the FID code also knows to strip "hdf5") logger.info('Calculating means and covariances...') mu, sigma = np.mean(pool, axis=0), np.cov(pool, rowvar=False) logger.info('Saving calculated means and covariances to disk...') logger.info(f'Save to {saved_inception_moments}') os.makedirs(os.path.dirname(saved_inception_moments), exist_ok=True) np.savez(saved_inception_moments, **{'mu': mu, 'sigma': sigma}) pass
def run(config): # Get loader config['drop_last'] = False loaders = utils.get_data_loaders(**config) # Load inception net net = inception_utils.load_inception_net(parallel=config['parallel']) pool, logits, labels = [], [], [] device = 'cuda' for i, (x, y) in enumerate(tqdm(loaders[0], total=(10000/config['batch_size']))): x = x.to(device) with torch.no_grad(): pool_val, logits_val = net(x) pool += [np.asarray(pool_val.cpu())] if i * config['batch_size'] > 10000: break pool = np.concatenate(pool, 0)[:10000] # only need 10k samples for PRDC np.savez(config['dataset'].strip('_hdf5')+'_inception_activations.npz', real_features=pool)
def run(config): # Get loader config['drop_last'] = False if config["dataset"] == "FFHQ": imsize = 256 root = os.path.join(os.environ["SSD"], "images256x256") root_perm = os.path.join(os.environ["SSD"], "images256x256") transform = transforms.Compose([ transforms.Scale(imsize), transforms.CenterCrop(imsize), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5]), ]) batch_size = 100 #config['batch_size'] dataset = FFHQ(root=root, transform=transform, test_mode=False) data_loader = DataLoader(dataset, batch_size, shuffle=True, drop_last=True) loaders = [data_loader] elif config["dataset"] == "coco": imsize = 128 batch_size = config['batch_size'] transform = transforms.Compose([ transforms.Resize(imsize), transforms.CenterCrop(imsize), #transforms.RandomHorizontalFlip(), #transforms.ColorJitter(brightness=0.01, contrast=0.01, saturation=0.01, hue=0.01), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)), ]) classes = [ 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow', 'elephant', 'monkey', 'zebra', 'giraffe' ] root = None root_perm = None dataset = CocoAnimals(root=root, batch_size=batch_size, classes=classes, transform=transform, masks=False, return_all=True, test_mode=False, imsize=imsize) data_loader = DataLoader(dataset, batch_size, drop_last=True, num_workers=1, shuffle=True) #,shuffle=False) loaders = [data_loader] else: loaders = utils.get_data_loaders(**config) # Load inception net net = inception_utils.load_inception_net(parallel=config['parallel']) pool, logits, labels = [], [], [] device = 'cuda' used_samples = 0 for e in range(2): for i, batch_data in enumerate(tqdm(loaders[0])): x = batch_data[0] y = batch_data[1] x = x.to(device) with torch.no_grad(): pool_val, logits_val = net(x) pool += [np.asarray(pool_val.cpu())] logits += [np.asarray(F.softmax(logits_val, 1).cpu())] labels += [np.asarray(y.cpu())] used_samples += x.size(0) if used_samples >= 50000: break pool, logits, labels = [ np.concatenate(item, 0) for item in [pool, logits, labels] ] # uncomment to save pool, logits, and labels to disk # print('Saving pool, logits, and labels to disk...') # np.savez(config['dataset']+'_inception_activations.npz', # {'pool': pool, 'logits': logits, 'labels': labels}) # Calculate inception metrics and report them print('Calculating inception metrics...') IS_mean, IS_std = inception_utils.calculate_inception_score(logits) print('Training data from dataset %s has IS of %5.5f +/- %5.5f' % (config['dataset'], IS_mean, IS_std)) # Prepare mu and sigma, save to disk. Remove "hdf5" by default # (the FID code also knows to strip "hdf5") print('Calculating means and covariances...') print(pool.shape) mu, sigma = np.mean(pool, axis=0), np.cov(pool, rowvar=False) print('Saving calculated means and covariances to disk...') np.savez(config['dataset'].strip('_hdf5') + '_inception_moments.npz', **{ 'mu': mu, 'sigma': sigma })
def run(config): # Get loader config['drop_last'] = False loaders = utils.get_data_loaders(**config) # Load inception net net = inception_utils.load_inception_net(parallel=config['parallel']) pool, logits, labels = [], [], [] device = 'cuda' init = 0 for i, (x, y) in enumerate(tqdm(loaders[0])): x = x.to(device) with torch.no_grad(): if torch.sum(y == (init + 1)) > 0: init += 1 print(y[0], init) pool_val, logits_val = net(x) pool += [np.asarray(pool_val.cpu()[y == (init - 1)])] logits += [ np.asarray( F.softmax(logits_val[y == (init - 1)], 1).cpu()) ] labels += [np.asarray(y.cpu()[y == (init - 1)])] pool, logits, labels = [ np.concatenate(item, 0) for item in [pool, logits, labels] ] print('Calculating inception metrics...') IS_mean, IS_std = inception_utils.calculate_inception_score( logits) print( 'Training data from dataset %s has IS of %5.5f +/- %5.5f' % (config['dataset'], IS_mean, IS_std)) print('Calculating means and covariances...') mu, sigma = np.mean(pool, axis=0), np.cov(pool, rowvar=False) print('Saving calculated means and covariances to disk...') np.savez( '../inception_moment/' + str(init - 1) + '_' + config['dataset'].strip('_hdf5') + '_inception_moments.npz', **{ 'mu': mu, 'sigma': sigma }) pool, logits, labels = [], [], [] pool_val, logits_val = net(x) pool += [np.asarray(pool_val.cpu()[y == (init)])] logits += [ np.asarray(F.softmax(logits_val[y == (init)], 1).cpu()) ] labels += [np.asarray(y.cpu()[y == (init)])] else: pool_val, logits_val = net(x) pool += [np.asarray(pool_val.cpu())] logits += [np.asarray(F.softmax(logits_val, 1).cpu())] labels += [np.asarray(y.cpu())] pool, logits, labels = [ np.concatenate(item, 0) for item in [pool, logits, labels] ] print('Calculating inception metrics...') IS_mean, IS_std = inception_utils.calculate_inception_score(logits) print('Training data from dataset %s has IS of %5.5f +/- %5.5f' % (config['dataset'], IS_mean, IS_std)) print('Calculating means and covariances...') mu, sigma = np.mean(pool, axis=0), np.cov(pool, rowvar=False) print('Saving calculated means and covariances to disk...') np.savez( '../inception_moment/' + str(init - 1) + '_' + config['dataset'].strip('_hdf5') + '_inception_moments.npz', **{ 'mu': mu, 'sigma': sigma })