def evaluate(loader, nz, domain, sem, mapping, generator, classifier, device): correct = 0 total = 0 for data, label in loader: data = data*2 - 1 N = len(data) d_trg = torch.tensor(domain).repeat(N).long().to(device) data, label = data.to(device), label.to(device) y = sem((data+1)*0.5).argmax(1) z = torch.randn(N, nz).to(device) s = mapping(z, y, d_trg) gen = generator(data, s) gen = normalize(gen) pred = F.softmax(classifier(gen), 1).argmax(1) correct += (pred == label).sum().cpu().float() total += len(pred) accuracy = correct / total accuracy = accuracy.cpu().numpy() print(accuracy) save_image(normalize(data), 'data.png') save_image(gen, 'gen.png') return accuracy
def execute(args): device = 'cuda' latent_dim = 16 batch_size = 128 # Load model state_dict = torch.load(args.state_dict_path, map_location='cpu') generator = Generator(bottleneck_size=args.bottleneck_size, bottleneck_blocks=4, img_size=args.img_size, max_conv_dim=args.max_conv_dim).to(device) generator.load_state_dict(state_dict['generator']) mapping = MappingNetwork() mapping.load_state_dict(state_dict['mapping_network']) mapping.to(device) mapping.eval() generator.eval() dataset = getattr(images, args.dataset_src)(args.data_root_src) src = torch.utils.data.DataLoader(dataset, batch_size=batch_size, num_workers=10) dataset = getattr(images, args.dataset_tgt)(args.data_root_tgt) trg = torch.utils.data.DataLoader(dataset, batch_size=batch_size, num_workers=10) print(f'Src size: {len(src)}, Tgt size: {len(trg)}') generated = [] print('Fetching generated data') d = torch.tensor(args.domain).repeat(batch_size).long().to(device) for data in src: data = data.to(device) d_trg = d[:data.shape[0]] for i in range(10): z_trg = torch.randn(data.shape[0], latent_dim, device=device) s_trg = mapping(z_trg, d_trg) gen = generator(data, s_trg) generated.append(gen) generated = torch.cat(generated) generated = normalize(generated) print('Fetching target data') trg_data = [] for data in trg: data = data.to(device) trg_data.append(data) trg_data = torch.cat(trg_data) trg_data = normalize(trg_data) print(generated.shape, generated.min(), generated.max(), trg_data.shape, trg_data.min(), trg_data.max()) save_image(generated[:100], 'gen.png') save_image(trg_data[:100], 'trg.png') computed_fid = fid.calculate_fid(trg_data, generated, 256, device, 2048) print(f'FID: {computed_fid}')
def execute(args): device = 'cuda' latent_dim = 16 batch_size = 128 # Load model save_path = args.save_path state_dict_path = get_last_model('nets_ema', save_path) state_dict = torch.load(state_dict_path, map_location='cpu') bottleneck_size = get_args(save_path)['bottleneck_size'] generator = Generator(bottleneck_size=bottleneck_size, bottleneck_blocks=4, img_size=args.img_size).to(device) generator.load_state_dict(state_dict['generator']) mapping = MappingNetwork(nc=args.nc) mapping.load_state_dict(state_dict['mapping_network']) mapping.to(device) sem = semantics(args.ss_path, args.model_type, args.da_path, nc=args.nc, shape=[3, args.img_size]).to(device) sem.eval() dataset = getattr(images, args.dataset_src)(args.data_root_src) src = torch.utils.data.DataLoader(dataset, batch_size=batch_size, num_workers=10) dataset = getattr(images, args.dataset_tgt)(args.data_root_tgt) trg = torch.utils.data.DataLoader(dataset, batch_size=batch_size, num_workers=10) print(f'Src size: {len(src)}, Tgt size: {len(trg)}') generated = [] #print('Fetching generated data') d = torch.tensor(args.domain).repeat(batch_size).long().to(device) for data in src: data = data.to(device) d_trg = d[:data.shape[0]] y_trg = sem((data + 1) * 0.5).argmax(1) for i in range(5): z_trg = torch.randn(data.shape[0], latent_dim, device=device) s_trg = mapping(z_trg, y_trg, d_trg) gen = generator(data, s_trg) generated.append(gen) generated = torch.cat(generated) generated = normalize(generated) #save_image(generated[:4], 'Debug.png') #print('Fetching target data') trg_data = [] for data in trg: data = data.to(device) trg_data.append(data) trg_data = torch.cat(trg_data) #print(trg_data.shape) trg_data = normalize(trg_data) #print(generated.min(), generated.max(), trg_data.min(), trg_data.max()) computed_fid = fid.calculate_fid(trg_data, generated, 512, device, 2048) print(f'FID: {computed_fid}') save_result(save_path, args.identifier, state_dict_path, computed_fid)