# Dimension of the latent vector _C.dimLatentVector = 512 # Should bias be initialized to zero ? _C.initBiasToZero = True # Per channel normalization _C.perChannelNormalization = True # Loss mode _C.lossMode = 'WGANGP' _C.ac_gan = False # Gradient penalty coefficient (WGANGP) _C.lambdaGP = 10. # Leakyness of the leakyRelU activation function _C.leakyness = 0.2 # Weight penalty on |D(x)|^2 _C.epsilonD = 0.001 # Mini batch regularization _C.miniBatchStdDev = True # Base learning rate _C.learning_rate = 0.001 # RGB or grey level output ? _C.dimOutput = 3
_C.imageSize = 64 # Learning rate for optimizers # _C.learningRate = 0.0002 # Base learning rate _C.baseLearningRate = 0.0002 # Beta1 hyperparam for Adam optimizers _C.beta1 = 0.5 # Loss mode _C.lossMode = 'DCGAN' # Gradient penalty coefficient (WGANGP) _C.lambdaGP = 0. # Noise standard deviation in case of instance noise (0 <=> no Instance noise) _C.sigmaNoise = 0. # Weight penalty on |D(x)|^2 _C.epsilonD = 0. # In case of AC GAN, weight on the classification loss (per scale) _C.weightConditionG = 0.0 _C.weightConditionD = 0.0 # Activate GDPP loss ? _C.GDPP = False # Number of epochs