dset = VideoFolderDataset(args.path, transform, mode='image', cache=args.cache) elif args.dataset == 'imagefolder': transform = transforms.Compose([ transforms.RandomHorizontalFlip(p=0.5 if args.flip else 0), transforms.Resize(args.size, Image.LANCZOS), transforms.CenterCrop(args.size), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)), ]) dset = datasets.ImageFolder(args.path, transform=transform) else: dset = get_image_dataset(args, args.dataset, args.path, train=args.eval_type == 'train') # args.n_sample = min(args.n_sample, len(dset)) indices = torch.randperm(len(dset))[:args.n_sample] dset = Subset(dset, indices) loader = DataLoader(dset, batch_size=args.batch, num_workers=4, shuffle=True) features = extract_features(loader, inception, device).numpy() # features = features[: args.n_sample] print(f"extracted {features.shape[0]} features")
d_optim.load_state_dict(ckpt["d_optim"]) if args.distributed: generator = nn.parallel.DistributedDataParallel( generator, device_ids=[args.local_rank], output_device=args.local_rank, broadcast_buffers=False, ) discriminator = nn.parallel.DistributedDataParallel( discriminator, device_ids=[args.local_rank], output_device=args.local_rank, broadcast_buffers=False, ) dataset = get_image_dataset(args, args.dataset, args.path, train=True) loader = data.DataLoader( dataset, batch_size=args.batch, sampler=data_sampler(dataset, shuffle=True, distributed=args.distributed), drop_last=True, ) if get_rank() == 0 and wandb is not None and args.wandb: wandb.init(project=args.name) util.print_models([generator, discriminator], args) train(args, loader, generator, discriminator, g_optim, d_optim, g_ema, device)