Beispiel #1
0
opt.lr = 5e-4  # Initial learning rate
opt.lr_policy = 'linear'  # decay policy.  Availability:  see options/train_options.py
opt.beta1 = 0.5  # parameter for ADAM

# Set up the loss function
opt.is_Feat = False  # Whether to use feature matching loss or not
opt.lambda_feat = 1
opt.gan_mode = 'wgangp'

##############################################################################################################
if opt.gan_mode == 'wgangp':
    opt.norm_D = 'instance'  # Use instance normalization when using WGAN.  Available: 'instance', 'batch', 'none'
else:
    opt.norm_D = 'batch'  # Used batch normalization otherwise

opt.activation_D = 'LeakyReLU'
opt.activation_G = 'ReLU'  # The output activation function at the last layer in the decoder
opt.norm_G = 'batch'

if opt.dataset_mode == 'CIFAR10':
    opt.dataroot = './data'

    transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
    ])

    trainset = torchvision.datasets.CIFAR10(root='./data',
                                            train=True,
                                            download=True,
                                            transform=transform)