), batch_size=args.batchsize, shuffle=True, num_workers=args.nworkers ) test_loader = torch.utils.data.DataLoader( datasets.CelebA5bit(train=False, transform=transforms.Compose([ lambda x: add_noise(x, nvals=32), ])), batch_size=args.val_batchsize, shuffle=False, num_workers=args.nworkers ) elif args.data == 'imagenet32': im_dim = 3 init_layer = layers.LogitTransform(0.05) if args.imagesize != 32: logger.info('Changing image size to 32.') args.imagesize = 32 train_loader = torch.utils.data.DataLoader( datasets.Imagenet32(train=True, transform=transforms.Compose([ add_noise, ])), batch_size=args.batchsize, shuffle=True, num_workers=args.nworkers ) test_loader = torch.utils.data.DataLoader( datasets.Imagenet32(train=False, transform=transforms.Compose([ add_noise, ])), batch_size=args.val_batchsize, shuffle=False, num_workers=args.nworkers ) elif args.data == 'imagenet64': im_dim = 3 init_layer = layers.LogitTransform(0.05) if args.imagesize != 64: logger.info('Changing image size to 64.') args.imagesize = 64 train_loader = torch.utils.data.DataLoader( datasets.Imagenet64(train=True, transform=transforms.Compose([
test_loader = torch.utils.data.DataLoader(datasets.CelebA5bit( train=False, transform=transforms.Compose([ lambda x: add_noise(x, nvals=32), ])), batch_size=args.val_batchsize, shuffle=False, num_workers=args.nworkers) elif args.data == 'imagenet32': im_dim = 3 init_layer = layers.LogitTransform(0.05) if args.imagesize != 32: logger.info('Changing image size to 32.') args.imagesize = 32 train_loader = torch.utils.data.DataLoader(datasets.Imagenet32( train=True, transform=transforms.Compose([ add_noise, ])), batch_size=args.batchsize, shuffle=True, num_workers=args.nworkers) test_loader = torch.utils.data.DataLoader(datasets.Imagenet32( train=False, transform=transforms.Compose([ add_noise, ])), batch_size=args.val_batchsize, shuffle=False, num_workers=args.nworkers) elif args.data == 'imagenet64': im_dim = 3 init_layer = layers.LogitTransform(0.05) if args.imagesize != 64: