Beispiel #1
0
opt.lam_PAPR = 0.5
opt.is_regu_sigma = False
opt.lam_sigma = 100
##############################################################################################################

# Set up the loss function
opt.lambda_L2 = 128       # The weight for L2 loss
opt.is_Feat = False       # Whether to use feature matching loss or not
opt.lambda_feat = 1

if opt.gan_mode == 'wgangp':
    opt.norm_D = 'instance'   # Use instance normalization when using WGAN.  Available: 'instance', 'batch', 'none'
else:
    opt.norm_D = 'batch'      # Used batch normalization otherwise

opt.activation = 'sigmoid'    # The output activation function at the last layer in the decoder
opt.norm_EG = 'batch'

if opt.dataset_mode == 'CIFAR10':
    opt.dataroot = './data'
    opt.size = 32
    transform = transforms.Compose(
        [transforms.RandomHorizontalFlip(p=0.5),
         transforms.RandomCrop(opt.size, padding=5, pad_if_needed=True, fill=0, padding_mode='reflect'),
         transforms.ToTensor(),
         transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])

    trainset = torchvision.datasets.CIFAR10(root='./data', train=True,
                                            download=True, transform=transform)
    dataset = torch.utils.data.DataLoader(trainset, batch_size=opt.batchSize,
                                          shuffle=True, num_workers=2, drop_last=True)